Fix typo in arm_mve.h __arm_vcmpneq_s8 return type
[gcc.git] / gcc / config / arm / arm_mve.h
1 /* Arm MVE intrinsics include file.
2
3 Copyright (C) 2019-2021 Free Software Foundation, Inc.
4 Contributed by Arm.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #ifndef _GCC_ARM_MVE_H
23 #define _GCC_ARM_MVE_H
24
25 #if __ARM_BIG_ENDIAN
26 #error "MVE intrinsics are not supported in Big-Endian mode."
27 #elif !__ARM_FEATURE_MVE
28 #error "MVE feature not supported"
29 #else
30
31 #include <stdint.h>
32 #ifndef __cplusplus
33 #include <stdbool.h>
34 #endif
35 #include "arm_mve_types.h"
36
37 #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE
38 #define vst4q(__addr, __value) __arm_vst4q(__addr, __value)
39 #define vdupq_n(__a) __arm_vdupq_n(__a)
40 #define vabsq(__a) __arm_vabsq(__a)
41 #define vclsq(__a) __arm_vclsq(__a)
42 #define vclzq(__a) __arm_vclzq(__a)
43 #define vnegq(__a) __arm_vnegq(__a)
44 #define vaddlvq(__a) __arm_vaddlvq(__a)
45 #define vaddvq(__a) __arm_vaddvq(__a)
46 #define vmovlbq(__a) __arm_vmovlbq(__a)
47 #define vmovltq(__a) __arm_vmovltq(__a)
48 #define vmvnq(__a) __arm_vmvnq(__a)
49 #define vrev16q(__a) __arm_vrev16q(__a)
50 #define vrev32q(__a) __arm_vrev32q(__a)
51 #define vrev64q(__a) __arm_vrev64q(__a)
52 #define vqabsq(__a) __arm_vqabsq(__a)
53 #define vqnegq(__a) __arm_vqnegq(__a)
54 #define vshrq(__a, __imm) __arm_vshrq(__a, __imm)
55 #define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p)
56 #define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b)
57 #define vshlq(__a, __b) __arm_vshlq(__a, __b)
58 #define vsubq(__a, __b) __arm_vsubq(__a, __b)
59 #define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b)
60 #define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b)
61 #define vqsubq(__a, __b) __arm_vqsubq(__a, __b)
62 #define vqaddq(__a, __b) __arm_vqaddq(__a, __b)
63 #define vorrq(__a, __b) __arm_vorrq(__a, __b)
64 #define vornq(__a, __b) __arm_vornq(__a, __b)
65 #define vmulq(__a, __b) __arm_vmulq(__a, __b)
66 #define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b)
67 #define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b)
68 #define vmulhq(__a, __b) __arm_vmulhq(__a, __b)
69 #define vmladavq(__a, __b) __arm_vmladavq(__a, __b)
70 #define vminvq(__a, __b) __arm_vminvq(__a, __b)
71 #define vminq(__a, __b) __arm_vminq(__a, __b)
72 #define vmaxvq(__a, __b) __arm_vmaxvq(__a, __b)
73 #define vmaxq(__a, __b) __arm_vmaxq(__a, __b)
74 #define vhsubq(__a, __b) __arm_vhsubq(__a, __b)
75 #define vhaddq(__a, __b) __arm_vhaddq(__a, __b)
76 #define veorq(__a, __b) __arm_veorq(__a, __b)
77 #define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b)
78 #define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b)
79 #define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b)
80 #define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b)
81 #define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b)
82 #define vbicq(__a, __b) __arm_vbicq(__a, __b)
83 #define vandq(__a, __b) __arm_vandq(__a, __b)
84 #define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
85 #define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
86 #define vaddq(__a, __b) __arm_vaddq(__a, __b)
87 #define vabdq(__a, __b) __arm_vabdq(__a, __b)
88 #define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
89 #define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
90 #define vqshlq(__a, __b) __arm_vqshlq(__a, __b)
91 #define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b)
92 #define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b)
93 #define vminavq(__a, __b) __arm_vminavq(__a, __b)
94 #define vminaq(__a, __b) __arm_vminaq(__a, __b)
95 #define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b)
96 #define vmaxaq(__a, __b) __arm_vmaxaq(__a, __b)
97 #define vbrsrq(__a, __b) __arm_vbrsrq(__a, __b)
98 #define vshlq_n(__a, __imm) __arm_vshlq_n(__a, __imm)
99 #define vrshrq(__a, __imm) __arm_vrshrq(__a, __imm)
100 #define vqshlq_n(__a, __imm) __arm_vqshlq_n(__a, __imm)
101 #define vcmpltq(__a, __b) __arm_vcmpltq(__a, __b)
102 #define vcmpleq(__a, __b) __arm_vcmpleq(__a, __b)
103 #define vcmpgtq(__a, __b) __arm_vcmpgtq(__a, __b)
104 #define vcmpgeq(__a, __b) __arm_vcmpgeq(__a, __b)
105 #define vqshluq(__a, __imm) __arm_vqshluq(__a, __imm)
106 #define vqrdmulhq(__a, __b) __arm_vqrdmulhq(__a, __b)
107 #define vqdmulhq(__a, __b) __arm_vqdmulhq(__a, __b)
108 #define vmlsdavxq(__a, __b) __arm_vmlsdavxq(__a, __b)
109 #define vmlsdavq(__a, __b) __arm_vmlsdavq(__a, __b)
110 #define vmladavxq(__a, __b) __arm_vmladavxq(__a, __b)
111 #define vhcaddq_rot90(__a, __b) __arm_vhcaddq_rot90(__a, __b)
112 #define vhcaddq_rot270(__a, __b) __arm_vhcaddq_rot270(__a, __b)
113 #define vqmovntq(__a, __b) __arm_vqmovntq(__a, __b)
114 #define vqmovnbq(__a, __b) __arm_vqmovnbq(__a, __b)
115 #define vmulltq_poly(__a, __b) __arm_vmulltq_poly(__a, __b)
116 #define vmullbq_poly(__a, __b) __arm_vmullbq_poly(__a, __b)
117 #define vmovntq(__a, __b) __arm_vmovntq(__a, __b)
118 #define vmovnbq(__a, __b) __arm_vmovnbq(__a, __b)
119 #define vmlaldavq(__a, __b) __arm_vmlaldavq(__a, __b)
120 #define vqmovuntq(__a, __b) __arm_vqmovuntq(__a, __b)
121 #define vqmovunbq(__a, __b) __arm_vqmovunbq(__a, __b)
122 #define vshlltq(__a, __imm) __arm_vshlltq(__a, __imm)
123 #define vshllbq(__a, __imm) __arm_vshllbq(__a, __imm)
124 #define vqdmulltq(__a, __b) __arm_vqdmulltq(__a, __b)
125 #define vqdmullbq(__a, __b) __arm_vqdmullbq(__a, __b)
126 #define vmlsldavxq(__a, __b) __arm_vmlsldavxq(__a, __b)
127 #define vmlsldavq(__a, __b) __arm_vmlsldavq(__a, __b)
128 #define vmlaldavxq(__a, __b) __arm_vmlaldavxq(__a, __b)
129 #define vrmlaldavhq(__a, __b) __arm_vrmlaldavhq(__a, __b)
130 #define vaddlvaq(__a, __b) __arm_vaddlvaq(__a, __b)
131 #define vrmlsldavhxq(__a, __b) __arm_vrmlsldavhxq(__a, __b)
132 #define vrmlsldavhq(__a, __b) __arm_vrmlsldavhq(__a, __b)
133 #define vrmlaldavhxq(__a, __b) __arm_vrmlaldavhxq(__a, __b)
134 #define vabavq(__a, __b, __c) __arm_vabavq(__a, __b, __c)
135 #define vbicq_m_n(__a, __imm, __p) __arm_vbicq_m_n(__a, __imm, __p)
136 #define vqrshrnbq(__a, __b, __imm) __arm_vqrshrnbq(__a, __b, __imm)
137 #define vqrshrunbq(__a, __b, __imm) __arm_vqrshrunbq(__a, __b, __imm)
138 #define vrmlaldavhaq(__a, __b, __c) __arm_vrmlaldavhaq(__a, __b, __c)
139 #define vshlcq(__a, __b, __imm) __arm_vshlcq(__a, __b, __imm)
140 #define vpselq(__a, __b, __p) __arm_vpselq(__a, __b, __p)
141 #define vrev64q_m(__inactive, __a, __p) __arm_vrev64q_m(__inactive, __a, __p)
142 #define vqrdmlashq(__a, __b, __c) __arm_vqrdmlashq(__a, __b, __c)
143 #define vqrdmlahq(__a, __b, __c) __arm_vqrdmlahq(__a, __b, __c)
144 #define vqdmlashq(__a, __b, __c) __arm_vqdmlashq(__a, __b, __c)
145 #define vqdmlahq(__a, __b, __c) __arm_vqdmlahq(__a, __b, __c)
146 #define vmvnq_m(__inactive, __a, __p) __arm_vmvnq_m(__inactive, __a, __p)
147 #define vmlasq(__a, __b, __c) __arm_vmlasq(__a, __b, __c)
148 #define vmlaq(__a, __b, __c) __arm_vmlaq(__a, __b, __c)
149 #define vmladavq_p(__a, __b, __p) __arm_vmladavq_p(__a, __b, __p)
150 #define vmladavaq(__a, __b, __c) __arm_vmladavaq(__a, __b, __c)
151 #define vminvq_p(__a, __b, __p) __arm_vminvq_p(__a, __b, __p)
152 #define vmaxvq_p(__a, __b, __p) __arm_vmaxvq_p(__a, __b, __p)
153 #define vdupq_m(__inactive, __a, __p) __arm_vdupq_m(__inactive, __a, __p)
154 #define vcmpneq_m(__a, __b, __p) __arm_vcmpneq_m(__a, __b, __p)
155 #define vcmphiq_m(__a, __b, __p) __arm_vcmphiq_m(__a, __b, __p)
156 #define vcmpeqq_m(__a, __b, __p) __arm_vcmpeqq_m(__a, __b, __p)
157 #define vcmpcsq_m(__a, __b, __p) __arm_vcmpcsq_m(__a, __b, __p)
158 #define vcmpcsq_m_n(__a, __b, __p) __arm_vcmpcsq_m_n(__a, __b, __p)
159 #define vclzq_m(__inactive, __a, __p) __arm_vclzq_m(__inactive, __a, __p)
160 #define vaddvaq_p(__a, __b, __p) __arm_vaddvaq_p(__a, __b, __p)
161 #define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm)
162 #define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm)
163 #define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p)
164 #define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p)
165 #define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p)
166 #define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p)
167 #define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p)
168 #define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p)
169 #define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p)
170 #define vmaxaq_m(__a, __b, __p) __arm_vmaxaq_m(__a, __b, __p)
171 #define vcmpltq_m(__a, __b, __p) __arm_vcmpltq_m(__a, __b, __p)
172 #define vcmpleq_m(__a, __b, __p) __arm_vcmpleq_m(__a, __b, __p)
173 #define vcmpgtq_m(__a, __b, __p) __arm_vcmpgtq_m(__a, __b, __p)
174 #define vcmpgeq_m(__a, __b, __p) __arm_vcmpgeq_m(__a, __b, __p)
175 #define vqnegq_m(__inactive, __a, __p) __arm_vqnegq_m(__inactive, __a, __p)
176 #define vqabsq_m(__inactive, __a, __p) __arm_vqabsq_m(__inactive, __a, __p)
177 #define vnegq_m(__inactive, __a, __p) __arm_vnegq_m(__inactive, __a, __p)
178 #define vmlsdavxq_p(__a, __b, __p) __arm_vmlsdavxq_p(__a, __b, __p)
179 #define vmlsdavq_p(__a, __b, __p) __arm_vmlsdavq_p(__a, __b, __p)
180 #define vmladavxq_p(__a, __b, __p) __arm_vmladavxq_p(__a, __b, __p)
181 #define vclsq_m(__inactive, __a, __p) __arm_vclsq_m(__inactive, __a, __p)
182 #define vabsq_m(__inactive, __a, __p) __arm_vabsq_m(__inactive, __a, __p)
183 #define vqrdmlsdhxq(__inactive, __a, __b) __arm_vqrdmlsdhxq(__inactive, __a, __b)
184 #define vqrdmlsdhq(__inactive, __a, __b) __arm_vqrdmlsdhq(__inactive, __a, __b)
185 #define vqrdmladhxq(__inactive, __a, __b) __arm_vqrdmladhxq(__inactive, __a, __b)
186 #define vqrdmladhq(__inactive, __a, __b) __arm_vqrdmladhq(__inactive, __a, __b)
187 #define vqdmlsdhxq(__inactive, __a, __b) __arm_vqdmlsdhxq(__inactive, __a, __b)
188 #define vqdmlsdhq(__inactive, __a, __b) __arm_vqdmlsdhq(__inactive, __a, __b)
189 #define vqdmladhxq(__inactive, __a, __b) __arm_vqdmladhxq(__inactive, __a, __b)
190 #define vqdmladhq(__inactive, __a, __b) __arm_vqdmladhq(__inactive, __a, __b)
191 #define vmlsdavaxq(__a, __b, __c) __arm_vmlsdavaxq(__a, __b, __c)
192 #define vmlsdavaq(__a, __b, __c) __arm_vmlsdavaq(__a, __b, __c)
193 #define vmladavaxq(__a, __b, __c) __arm_vmladavaxq(__a, __b, __c)
194 #define vrmlaldavhaxq(__a, __b, __c) __arm_vrmlaldavhaxq(__a, __b, __c)
195 #define vrmlsldavhaq(__a, __b, __c) __arm_vrmlsldavhaq(__a, __b, __c)
196 #define vrmlsldavhaxq(__a, __b, __c) __arm_vrmlsldavhaxq(__a, __b, __c)
197 #define vaddlvaq_p(__a, __b, __p) __arm_vaddlvaq_p(__a, __b, __p)
198 #define vrev16q_m(__inactive, __a, __p) __arm_vrev16q_m(__inactive, __a, __p)
199 #define vrmlaldavhq_p(__a, __b, __p) __arm_vrmlaldavhq_p(__a, __b, __p)
200 #define vrmlaldavhxq_p(__a, __b, __p) __arm_vrmlaldavhxq_p(__a, __b, __p)
201 #define vrmlsldavhq_p(__a, __b, __p) __arm_vrmlsldavhq_p(__a, __b, __p)
202 #define vrmlsldavhxq_p(__a, __b, __p) __arm_vrmlsldavhxq_p(__a, __b, __p)
203 #define vorrq_m_n(__a, __imm, __p) __arm_vorrq_m_n(__a, __imm, __p)
204 #define vqrshrntq(__a, __b, __imm) __arm_vqrshrntq(__a, __b, __imm)
205 #define vqshrnbq(__a, __b, __imm) __arm_vqshrnbq(__a, __b, __imm)
206 #define vqshrntq(__a, __b, __imm) __arm_vqshrntq(__a, __b, __imm)
207 #define vrshrnbq(__a, __b, __imm) __arm_vrshrnbq(__a, __b, __imm)
208 #define vrshrntq(__a, __b, __imm) __arm_vrshrntq(__a, __b, __imm)
209 #define vshrnbq(__a, __b, __imm) __arm_vshrnbq(__a, __b, __imm)
210 #define vshrntq(__a, __b, __imm) __arm_vshrntq(__a, __b, __imm)
211 #define vmlaldavaq(__a, __b, __c) __arm_vmlaldavaq(__a, __b, __c)
212 #define vmlaldavaxq(__a, __b, __c) __arm_vmlaldavaxq(__a, __b, __c)
213 #define vmlsldavaq(__a, __b, __c) __arm_vmlsldavaq(__a, __b, __c)
214 #define vmlsldavaxq(__a, __b, __c) __arm_vmlsldavaxq(__a, __b, __c)
215 #define vmlaldavq_p(__a, __b, __p) __arm_vmlaldavq_p(__a, __b, __p)
216 #define vmlaldavxq_p(__a, __b, __p) __arm_vmlaldavxq_p(__a, __b, __p)
217 #define vmlsldavq_p(__a, __b, __p) __arm_vmlsldavq_p(__a, __b, __p)
218 #define vmlsldavxq_p(__a, __b, __p) __arm_vmlsldavxq_p(__a, __b, __p)
219 #define vmovlbq_m(__inactive, __a, __p) __arm_vmovlbq_m(__inactive, __a, __p)
220 #define vmovltq_m(__inactive, __a, __p) __arm_vmovltq_m(__inactive, __a, __p)
221 #define vmovnbq_m(__a, __b, __p) __arm_vmovnbq_m(__a, __b, __p)
222 #define vmovntq_m(__a, __b, __p) __arm_vmovntq_m(__a, __b, __p)
223 #define vqmovnbq_m(__a, __b, __p) __arm_vqmovnbq_m(__a, __b, __p)
224 #define vqmovntq_m(__a, __b, __p) __arm_vqmovntq_m(__a, __b, __p)
225 #define vrev32q_m(__inactive, __a, __p) __arm_vrev32q_m(__inactive, __a, __p)
226 #define vqrshruntq(__a, __b, __imm) __arm_vqrshruntq(__a, __b, __imm)
227 #define vqshrunbq(__a, __b, __imm) __arm_vqshrunbq(__a, __b, __imm)
228 #define vqshruntq(__a, __b, __imm) __arm_vqshruntq(__a, __b, __imm)
229 #define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p)
230 #define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p)
231 #define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p)
232 #define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p)
233 #define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p)
234 #define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
235 #define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p)
236 #define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p)
237 #define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p)
238 #define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p)
239 #define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p)
240 #define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p)
241 #define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p)
242 #define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p)
243 #define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p)
244 #define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p)
245 #define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p)
246 #define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p)
247 #define vhsubq_m(__inactive, __a, __b, __p) __arm_vhsubq_m(__inactive, __a, __b, __p)
248 #define vmaxq_m(__inactive, __a, __b, __p) __arm_vmaxq_m(__inactive, __a, __b, __p)
249 #define vminq_m(__inactive, __a, __b, __p) __arm_vminq_m(__inactive, __a, __b, __p)
250 #define vmladavaq_p(__a, __b, __c, __p) __arm_vmladavaq_p(__a, __b, __c, __p)
251 #define vmladavaxq_p(__a, __b, __c, __p) __arm_vmladavaxq_p(__a, __b, __c, __p)
252 #define vmlaq_m(__a, __b, __c, __p) __arm_vmlaq_m(__a, __b, __c, __p)
253 #define vmlasq_m(__a, __b, __c, __p) __arm_vmlasq_m(__a, __b, __c, __p)
254 #define vmlsdavaq_p(__a, __b, __c, __p) __arm_vmlsdavaq_p(__a, __b, __c, __p)
255 #define vmlsdavaxq_p(__a, __b, __c, __p) __arm_vmlsdavaxq_p(__a, __b, __c, __p)
256 #define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p)
257 #define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p)
258 #define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p)
259 #define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p)
260 #define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p)
261 #define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p)
262 #define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p)
263 #define vqdmladhq_m(__inactive, __a, __b, __p) __arm_vqdmladhq_m(__inactive, __a, __b, __p)
264 #define vqdmlashq_m(__a, __b, __c, __p) __arm_vqdmlashq_m(__a, __b, __c, __p)
265 #define vqdmladhxq_m(__inactive, __a, __b, __p) __arm_vqdmladhxq_m(__inactive, __a, __b, __p)
266 #define vqdmlahq_m(__a, __b, __c, __p) __arm_vqdmlahq_m(__a, __b, __c, __p)
267 #define vqdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m(__inactive, __a, __b, __p)
268 #define vqdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m(__inactive, __a, __b, __p)
269 #define vqdmulhq_m(__inactive, __a, __b, __p) __arm_vqdmulhq_m(__inactive, __a, __b, __p)
270 #define vqrdmladhq_m(__inactive, __a, __b, __p) __arm_vqrdmladhq_m(__inactive, __a, __b, __p)
271 #define vqrdmladhxq_m(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m(__inactive, __a, __b, __p)
272 #define vqrdmlahq_m(__a, __b, __c, __p) __arm_vqrdmlahq_m(__a, __b, __c, __p)
273 #define vqrdmlashq_m(__a, __b, __c, __p) __arm_vqrdmlashq_m(__a, __b, __c, __p)
274 #define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p)
275 #define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p)
276 #define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p)
277 #define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p)
278 #define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p)
279 #define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p)
280 #define vqsubq_m(__inactive, __a, __b, __p) __arm_vqsubq_m(__inactive, __a, __b, __p)
281 #define vrhaddq_m(__inactive, __a, __b, __p) __arm_vrhaddq_m(__inactive, __a, __b, __p)
282 #define vrmulhq_m(__inactive, __a, __b, __p) __arm_vrmulhq_m(__inactive, __a, __b, __p)
283 #define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p)
284 #define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p)
285 #define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p)
286 #define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p)
287 #define vsliq_m(__a, __b, __imm, __p) __arm_vsliq_m(__a, __b, __imm, __p)
288 #define vmlaldavaq_p(__a, __b, __c, __p) __arm_vmlaldavaq_p(__a, __b, __c, __p)
289 #define vmlaldavaxq_p(__a, __b, __c, __p) __arm_vmlaldavaxq_p(__a, __b, __c, __p)
290 #define vmlsldavaq_p(__a, __b, __c, __p) __arm_vmlsldavaq_p(__a, __b, __c, __p)
291 #define vmlsldavaxq_p(__a, __b, __c, __p) __arm_vmlsldavaxq_p(__a, __b, __c, __p)
292 #define vmullbq_poly_m(__inactive, __a, __b, __p) __arm_vmullbq_poly_m(__inactive, __a, __b, __p)
293 #define vmulltq_poly_m(__inactive, __a, __b, __p) __arm_vmulltq_poly_m(__inactive, __a, __b, __p)
294 #define vqdmullbq_m(__inactive, __a, __b, __p) __arm_vqdmullbq_m(__inactive, __a, __b, __p)
295 #define vqdmulltq_m(__inactive, __a, __b, __p) __arm_vqdmulltq_m(__inactive, __a, __b, __p)
296 #define vqrshrnbq_m(__a, __b, __imm, __p) __arm_vqrshrnbq_m(__a, __b, __imm, __p)
297 #define vqrshrntq_m(__a, __b, __imm, __p) __arm_vqrshrntq_m(__a, __b, __imm, __p)
298 #define vqrshrunbq_m(__a, __b, __imm, __p) __arm_vqrshrunbq_m(__a, __b, __imm, __p)
299 #define vqrshruntq_m(__a, __b, __imm, __p) __arm_vqrshruntq_m(__a, __b, __imm, __p)
300 #define vqshrnbq_m(__a, __b, __imm, __p) __arm_vqshrnbq_m(__a, __b, __imm, __p)
301 #define vqshrntq_m(__a, __b, __imm, __p) __arm_vqshrntq_m(__a, __b, __imm, __p)
302 #define vqshrunbq_m(__a, __b, __imm, __p) __arm_vqshrunbq_m(__a, __b, __imm, __p)
303 #define vqshruntq_m(__a, __b, __imm, __p) __arm_vqshruntq_m(__a, __b, __imm, __p)
304 #define vrmlaldavhaq_p(__a, __b, __c, __p) __arm_vrmlaldavhaq_p(__a, __b, __c, __p)
305 #define vrmlaldavhaxq_p(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p(__a, __b, __c, __p)
306 #define vrmlsldavhaq_p(__a, __b, __c, __p) __arm_vrmlsldavhaq_p(__a, __b, __c, __p)
307 #define vrmlsldavhaxq_p(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p(__a, __b, __c, __p)
308 #define vrshrnbq_m(__a, __b, __imm, __p) __arm_vrshrnbq_m(__a, __b, __imm, __p)
309 #define vrshrntq_m(__a, __b, __imm, __p) __arm_vrshrntq_m(__a, __b, __imm, __p)
310 #define vshllbq_m(__inactive, __a, __imm, __p) __arm_vshllbq_m(__inactive, __a, __imm, __p)
311 #define vshlltq_m(__inactive, __a, __imm, __p) __arm_vshlltq_m(__inactive, __a, __imm, __p)
312 #define vshrnbq_m(__a, __b, __imm, __p) __arm_vshrnbq_m(__a, __b, __imm, __p)
313 #define vshrntq_m(__a, __b, __imm, __p) __arm_vshrntq_m(__a, __b, __imm, __p)
314 #define vstrbq_scatter_offset(__base, __offset, __value) __arm_vstrbq_scatter_offset(__base, __offset, __value)
315 #define vstrbq(__addr, __value) __arm_vstrbq(__addr, __value)
316 #define vstrwq_scatter_base(__addr, __offset, __value) __arm_vstrwq_scatter_base(__addr, __offset, __value)
317 #define vldrbq_gather_offset(__base, __offset) __arm_vldrbq_gather_offset(__base, __offset)
318 #define vstrbq_p(__addr, __value, __p) __arm_vstrbq_p(__addr, __value, __p)
319 #define vstrbq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p(__base, __offset, __value, __p)
320 #define vstrwq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p(__addr, __offset, __value, __p)
321 #define vldrbq_gather_offset_z(__base, __offset, __p) __arm_vldrbq_gather_offset_z(__base, __offset, __p)
322 #define vld1q(__base) __arm_vld1q(__base)
323 #define vldrhq_gather_offset(__base, __offset) __arm_vldrhq_gather_offset(__base, __offset)
324 #define vldrhq_gather_offset_z(__base, __offset, __p) __arm_vldrhq_gather_offset_z(__base, __offset, __p)
325 #define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset)
326 #define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p)
327 #define vldrdq_gather_offset(__base, __offset) __arm_vldrdq_gather_offset(__base, __offset)
328 #define vldrdq_gather_offset_z(__base, __offset, __p) __arm_vldrdq_gather_offset_z(__base, __offset, __p)
329 #define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset)
330 #define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p)
331 #define vldrwq_gather_offset(__base, __offset) __arm_vldrwq_gather_offset(__base, __offset)
332 #define vldrwq_gather_offset_z(__base, __offset, __p) __arm_vldrwq_gather_offset_z(__base, __offset, __p)
333 #define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset)
334 #define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p)
335 #define vst1q(__addr, __value) __arm_vst1q(__addr, __value)
336 #define vstrhq_scatter_offset(__base, __offset, __value) __arm_vstrhq_scatter_offset(__base, __offset, __value)
337 #define vstrhq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p(__base, __offset, __value, __p)
338 #define vstrhq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset(__base, __offset, __value)
339 #define vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p)
340 #define vstrhq(__addr, __value) __arm_vstrhq(__addr, __value)
341 #define vstrhq_p(__addr, __value, __p) __arm_vstrhq_p(__addr, __value, __p)
342 #define vstrwq(__addr, __value) __arm_vstrwq(__addr, __value)
343 #define vstrwq_p(__addr, __value, __p) __arm_vstrwq_p(__addr, __value, __p)
344 #define vstrdq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p(__addr, __offset, __value, __p)
345 #define vstrdq_scatter_base(__addr, __offset, __value) __arm_vstrdq_scatter_base(__addr, __offset, __value)
346 #define vstrdq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p(__base, __offset, __value, __p)
347 #define vstrdq_scatter_offset(__base, __offset, __value) __arm_vstrdq_scatter_offset(__base, __offset, __value)
348 #define vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p)
349 #define vstrdq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset(__base, __offset, __value)
350 #define vstrwq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p(__base, __offset, __value, __p)
351 #define vstrwq_scatter_offset(__base, __offset, __value) __arm_vstrwq_scatter_offset(__base, __offset, __value)
352 #define vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p)
353 #define vstrwq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset(__base, __offset, __value)
354 #define vuninitializedq(__v) __arm_vuninitializedq(__v)
355 #define vreinterpretq_s16(__a) __arm_vreinterpretq_s16(__a)
356 #define vreinterpretq_s32(__a) __arm_vreinterpretq_s32(__a)
357 #define vreinterpretq_s64(__a) __arm_vreinterpretq_s64(__a)
358 #define vreinterpretq_s8(__a) __arm_vreinterpretq_s8(__a)
359 #define vreinterpretq_u16(__a) __arm_vreinterpretq_u16(__a)
360 #define vreinterpretq_u32(__a) __arm_vreinterpretq_u32(__a)
361 #define vreinterpretq_u64(__a) __arm_vreinterpretq_u64(__a)
362 #define vreinterpretq_u8(__a) __arm_vreinterpretq_u8(__a)
363 #define vddupq_m(__inactive, __a, __imm, __p) __arm_vddupq_m(__inactive, __a, __imm, __p)
364 #define vddupq_u8(__a, __imm) __arm_vddupq_u8(__a, __imm)
365 #define vddupq_u32(__a, __imm) __arm_vddupq_u32(__a, __imm)
366 #define vddupq_u16(__a, __imm) __arm_vddupq_u16(__a, __imm)
367 #define vdwdupq_m(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m(__inactive, __a, __b, __imm, __p)
368 #define vdwdupq_u8(__a, __b, __imm) __arm_vdwdupq_u8(__a, __b, __imm)
369 #define vdwdupq_u32(__a, __b, __imm) __arm_vdwdupq_u32(__a, __b, __imm)
370 #define vdwdupq_u16(__a, __b, __imm) __arm_vdwdupq_u16(__a, __b, __imm)
371 #define vidupq_m(__inactive, __a, __imm, __p) __arm_vidupq_m(__inactive, __a, __imm, __p)
372 #define vidupq_u8(__a, __imm) __arm_vidupq_u8(__a, __imm)
373 #define vidupq_u32(__a, __imm) __arm_vidupq_u32(__a, __imm)
374 #define vidupq_u16(__a, __imm) __arm_vidupq_u16(__a, __imm)
375 #define viwdupq_m(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m(__inactive, __a, __b, __imm, __p)
376 #define viwdupq_u8(__a, __b, __imm) __arm_viwdupq_u8(__a, __b, __imm)
377 #define viwdupq_u32(__a, __b, __imm) __arm_viwdupq_u32(__a, __b, __imm)
378 #define viwdupq_u16(__a, __b, __imm) __arm_viwdupq_u16(__a, __b, __imm)
379 #define vstrdq_scatter_base_wb(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb(__addr, __offset, __value)
380 #define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p)
381 #define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p)
382 #define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value)
383 #define vddupq_x_u8(__a, __imm, __p) __arm_vddupq_x_u8(__a, __imm, __p)
384 #define vddupq_x_u16(__a, __imm, __p) __arm_vddupq_x_u16(__a, __imm, __p)
385 #define vddupq_x_u32(__a, __imm, __p) __arm_vddupq_x_u32(__a, __imm, __p)
386 #define vdwdupq_x_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_u8(__a, __b, __imm, __p)
387 #define vdwdupq_x_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_u16(__a, __b, __imm, __p)
388 #define vdwdupq_x_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_u32(__a, __b, __imm, __p)
389 #define vidupq_x_u8(__a, __imm, __p) __arm_vidupq_x_u8(__a, __imm, __p)
390 #define vidupq_x_u16(__a, __imm, __p) __arm_vidupq_x_u16(__a, __imm, __p)
391 #define vidupq_x_u32(__a, __imm, __p) __arm_vidupq_x_u32(__a, __imm, __p)
392 #define viwdupq_x_u8(__a, __b, __imm, __p) __arm_viwdupq_x_u8(__a, __b, __imm, __p)
393 #define viwdupq_x_u16(__a, __b, __imm, __p) __arm_viwdupq_x_u16(__a, __b, __imm, __p)
394 #define viwdupq_x_u32(__a, __b, __imm, __p) __arm_viwdupq_x_u32(__a, __b, __imm, __p)
395 #define vminq_x(__a, __b, __p) __arm_vminq_x(__a, __b, __p)
396 #define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p)
397 #define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p)
398 #define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p)
399 #define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p)
400 #define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p)
401 #define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p)
402 #define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p)
403 #define vmulhq_x(__a, __b, __p) __arm_vmulhq_x(__a, __b, __p)
404 #define vmullbq_poly_x(__a, __b, __p) __arm_vmullbq_poly_x(__a, __b, __p)
405 #define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p)
406 #define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p)
407 #define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p)
408 #define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p)
409 #define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p)
410 #define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p)
411 #define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p)
412 #define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p)
413 #define vhcaddq_rot90_x(__a, __b, __p) __arm_vhcaddq_rot90_x(__a, __b, __p)
414 #define vhcaddq_rot270_x(__a, __b, __p) __arm_vhcaddq_rot270_x(__a, __b, __p)
415 #define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p)
416 #define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p)
417 #define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p)
418 #define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p)
419 #define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p)
420 #define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p)
421 #define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p)
422 #define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p)
423 #define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p)
424 #define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p)
425 #define vornq_x(__a, __b, __p) __arm_vornq_x(__a, __b, __p)
426 #define vorrq_x(__a, __b, __p) __arm_vorrq_x(__a, __b, __p)
427 #define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p)
428 #define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p)
429 #define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p)
430 #define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p)
431 #define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p)
432 #define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p)
433 #define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p)
434 #define vshlq_x_n(__a, __imm, __p) __arm_vshlq_x_n(__a, __imm, __p)
435 #define vrshrq_x(__a, __imm, __p) __arm_vrshrq_x(__a, __imm, __p)
436 #define vshrq_x(__a, __imm, __p) __arm_vshrq_x(__a, __imm, __p)
437 #define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out)
438 #define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p)
439 #define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry)
440 #define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p)
441 #define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out)
442 #define vsbciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m(__inactive, __a, __b, __carry_out, __p)
443 #define vsbcq(__a, __b, __carry) __arm_vsbcq(__a, __b, __carry)
444 #define vsbcq_m(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m(__inactive, __a, __b, __carry, __p)
445 #define vst1q_p(__addr, __value, __p) __arm_vst1q_p(__addr, __value, __p)
446 #define vst2q(__addr, __value) __arm_vst2q(__addr, __value)
447 #define vld1q_z(__base, __p) __arm_vld1q_z(__base, __p)
448 #define vld2q(__addr) __arm_vld2q(__addr)
449 #define vld4q(__addr) __arm_vld4q(__addr)
450 #define vsetq_lane(__a, __b, __idx) __arm_vsetq_lane(__a, __b, __idx)
451 #define vgetq_lane(__a, __idx) __arm_vgetq_lane(__a, __idx)
452 #define vshlcq_m(__a, __b, __imm, __p) __arm_vshlcq_m(__a, __b, __imm, __p)
453 #define vrndxq(__a) __arm_vrndxq(__a)
454 #define vrndq(__a) __arm_vrndq(__a)
455 #define vrndpq(__a) __arm_vrndpq(__a)
456 #define vrndnq(__a) __arm_vrndnq(__a)
457 #define vrndmq(__a) __arm_vrndmq(__a)
458 #define vrndaq(__a) __arm_vrndaq(__a)
459 #define vcvttq_f32(__a) __arm_vcvttq_f32(__a)
460 #define vcvtbq_f32(__a) __arm_vcvtbq_f32(__a)
461 #define vcvtq(__a) __arm_vcvtq(__a)
462 #define vcvtq_n(__a, __imm6) __arm_vcvtq_n(__a, __imm6)
463 #define vminnmvq(__a, __b) __arm_vminnmvq(__a, __b)
464 #define vminnmq(__a, __b) __arm_vminnmq(__a, __b)
465 #define vminnmavq(__a, __b) __arm_vminnmavq(__a, __b)
466 #define vminnmaq(__a, __b) __arm_vminnmaq(__a, __b)
467 #define vmaxnmvq(__a, __b) __arm_vmaxnmvq(__a, __b)
468 #define vmaxnmq(__a, __b) __arm_vmaxnmq(__a, __b)
469 #define vmaxnmavq(__a, __b) __arm_vmaxnmavq(__a, __b)
470 #define vmaxnmaq(__a, __b) __arm_vmaxnmaq(__a, __b)
471 #define vcmulq_rot90(__a, __b) __arm_vcmulq_rot90(__a, __b)
472 #define vcmulq_rot270(__a, __b) __arm_vcmulq_rot270(__a, __b)
473 #define vcmulq_rot180(__a, __b) __arm_vcmulq_rot180(__a, __b)
474 #define vcmulq(__a, __b) __arm_vcmulq(__a, __b)
475 #define vcvtaq_m(__inactive, __a, __p) __arm_vcvtaq_m(__inactive, __a, __p)
476 #define vcvtq_m(__inactive, __a, __p) __arm_vcvtq_m(__inactive, __a, __p)
477 #define vcvtbq_m(__a, __b, __p) __arm_vcvtbq_m(__a, __b, __p)
478 #define vcvttq_m(__a, __b, __p) __arm_vcvttq_m(__a, __b, __p)
479 #define vcmlaq(__a, __b, __c) __arm_vcmlaq(__a, __b, __c)
480 #define vcmlaq_rot180(__a, __b, __c) __arm_vcmlaq_rot180(__a, __b, __c)
481 #define vcmlaq_rot270(__a, __b, __c) __arm_vcmlaq_rot270(__a, __b, __c)
482 #define vcmlaq_rot90(__a, __b, __c) __arm_vcmlaq_rot90(__a, __b, __c)
483 #define vfmaq(__a, __b, __c) __arm_vfmaq(__a, __b, __c)
484 #define vfmasq(__a, __b, __c) __arm_vfmasq(__a, __b, __c)
485 #define vfmsq(__a, __b, __c) __arm_vfmsq(__a, __b, __c)
486 #define vcvtmq_m(__inactive, __a, __p) __arm_vcvtmq_m(__inactive, __a, __p)
487 #define vcvtnq_m(__inactive, __a, __p) __arm_vcvtnq_m(__inactive, __a, __p)
488 #define vcvtpq_m(__inactive, __a, __p) __arm_vcvtpq_m(__inactive, __a, __p)
489 #define vmaxnmaq_m(__a, __b, __p) __arm_vmaxnmaq_m(__a, __b, __p)
490 #define vmaxnmavq_p(__a, __b, __p) __arm_vmaxnmavq_p(__a, __b, __p)
491 #define vmaxnmvq_p(__a, __b, __p) __arm_vmaxnmvq_p(__a, __b, __p)
492 #define vminnmaq_m(__a, __b, __p) __arm_vminnmaq_m(__a, __b, __p)
493 #define vminnmavq_p(__a, __b, __p) __arm_vminnmavq_p(__a, __b, __p)
494 #define vminnmvq_p(__a, __b, __p) __arm_vminnmvq_p(__a, __b, __p)
495 #define vrndaq_m(__inactive, __a, __p) __arm_vrndaq_m(__inactive, __a, __p)
496 #define vrndmq_m(__inactive, __a, __p) __arm_vrndmq_m(__inactive, __a, __p)
497 #define vrndnq_m(__inactive, __a, __p) __arm_vrndnq_m(__inactive, __a, __p)
498 #define vrndpq_m(__inactive, __a, __p) __arm_vrndpq_m(__inactive, __a, __p)
499 #define vrndq_m(__inactive, __a, __p) __arm_vrndq_m(__inactive, __a, __p)
500 #define vrndxq_m(__inactive, __a, __p) __arm_vrndxq_m(__inactive, __a, __p)
501 #define vcvtq_m_n(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n(__inactive, __a, __imm6, __p)
502 #define vcmlaq_m(__a, __b, __c, __p) __arm_vcmlaq_m(__a, __b, __c, __p)
503 #define vcmlaq_rot180_m(__a, __b, __c, __p) __arm_vcmlaq_rot180_m(__a, __b, __c, __p)
504 #define vcmlaq_rot270_m(__a, __b, __c, __p) __arm_vcmlaq_rot270_m(__a, __b, __c, __p)
505 #define vcmlaq_rot90_m(__a, __b, __c, __p) __arm_vcmlaq_rot90_m(__a, __b, __c, __p)
506 #define vcmulq_m(__inactive, __a, __b, __p) __arm_vcmulq_m(__inactive, __a, __b, __p)
507 #define vcmulq_rot180_m(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m(__inactive, __a, __b, __p)
508 #define vcmulq_rot270_m(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m(__inactive, __a, __b, __p)
509 #define vcmulq_rot90_m(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m(__inactive, __a, __b, __p)
510 #define vfmaq_m(__a, __b, __c, __p) __arm_vfmaq_m(__a, __b, __c, __p)
511 #define vfmasq_m(__a, __b, __c, __p) __arm_vfmasq_m(__a, __b, __c, __p)
512 #define vfmsq_m(__a, __b, __c, __p) __arm_vfmsq_m(__a, __b, __c, __p)
513 #define vmaxnmq_m(__inactive, __a, __b, __p) __arm_vmaxnmq_m(__inactive, __a, __b, __p)
514 #define vminnmq_m(__inactive, __a, __b, __p) __arm_vminnmq_m(__inactive, __a, __b, __p)
515 #define vreinterpretq_f16(__a) __arm_vreinterpretq_f16(__a)
516 #define vreinterpretq_f32(__a) __arm_vreinterpretq_f32(__a)
517 #define vminnmq_x(__a, __b, __p) __arm_vminnmq_x(__a, __b, __p)
518 #define vmaxnmq_x(__a, __b, __p) __arm_vmaxnmq_x(__a, __b, __p)
519 #define vcmulq_x(__a, __b, __p) __arm_vcmulq_x(__a, __b, __p)
520 #define vcmulq_rot90_x(__a, __b, __p) __arm_vcmulq_rot90_x(__a, __b, __p)
521 #define vcmulq_rot180_x(__a, __b, __p) __arm_vcmulq_rot180_x(__a, __b, __p)
522 #define vcmulq_rot270_x(__a, __b, __p) __arm_vcmulq_rot270_x(__a, __b, __p)
523 #define vcvtq_x(__a, __p) __arm_vcvtq_x(__a, __p)
524 #define vcvtq_x_n(__a, __imm6, __p) __arm_vcvtq_x_n(__a, __imm6, __p)
525 #define vrndq_x(__a, __p) __arm_vrndq_x(__a, __p)
526 #define vrndnq_x(__a, __p) __arm_vrndnq_x(__a, __p)
527 #define vrndmq_x(__a, __p) __arm_vrndmq_x(__a, __p)
528 #define vrndpq_x(__a, __p) __arm_vrndpq_x(__a, __p)
529 #define vrndaq_x(__a, __p) __arm_vrndaq_x(__a, __p)
530 #define vrndxq_x(__a, __p) __arm_vrndxq_x(__a, __p)
531
532
533 #define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value)
534 #define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value)
535 #define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value)
536 #define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value)
537 #define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value)
538 #define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value)
539 #define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value)
540 #define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value)
541 #define vrndxq_f16(__a) __arm_vrndxq_f16(__a)
542 #define vrndxq_f32(__a) __arm_vrndxq_f32(__a)
543 #define vrndq_f16(__a) __arm_vrndq_f16(__a)
544 #define vrndq_f32(__a) __arm_vrndq_f32(__a)
545 #define vrndpq_f16(__a) __arm_vrndpq_f16(__a)
546 #define vrndpq_f32(__a) __arm_vrndpq_f32(__a)
547 #define vrndnq_f16(__a) __arm_vrndnq_f16(__a)
548 #define vrndnq_f32(__a) __arm_vrndnq_f32(__a)
549 #define vrndmq_f16(__a) __arm_vrndmq_f16(__a)
550 #define vrndmq_f32(__a) __arm_vrndmq_f32(__a)
551 #define vrndaq_f16(__a) __arm_vrndaq_f16(__a)
552 #define vrndaq_f32(__a) __arm_vrndaq_f32(__a)
553 #define vrev64q_f16(__a) __arm_vrev64q_f16(__a)
554 #define vrev64q_f32(__a) __arm_vrev64q_f32(__a)
555 #define vnegq_f16(__a) __arm_vnegq_f16(__a)
556 #define vnegq_f32(__a) __arm_vnegq_f32(__a)
557 #define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a)
558 #define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a)
559 #define vabsq_f16(__a) __arm_vabsq_f16(__a)
560 #define vabsq_f32(__a) __arm_vabsq_f32(__a)
561 #define vrev32q_f16(__a) __arm_vrev32q_f16(__a)
562 #define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a)
563 #define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a)
564 #define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a)
565 #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a)
566 #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a)
567 #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a)
568 #define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a)
569 #define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a)
570 #define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a)
571 #define vabsq_s8(__a) __arm_vabsq_s8(__a)
572 #define vabsq_s16(__a) __arm_vabsq_s16(__a)
573 #define vabsq_s32(__a) __arm_vabsq_s32(__a)
574 #define vclsq_s8(__a) __arm_vclsq_s8(__a)
575 #define vclsq_s16(__a) __arm_vclsq_s16(__a)
576 #define vclsq_s32(__a) __arm_vclsq_s32(__a)
577 #define vclzq_s8(__a) __arm_vclzq_s8(__a)
578 #define vclzq_s16(__a) __arm_vclzq_s16(__a)
579 #define vclzq_s32(__a) __arm_vclzq_s32(__a)
580 #define vnegq_s8(__a) __arm_vnegq_s8(__a)
581 #define vnegq_s16(__a) __arm_vnegq_s16(__a)
582 #define vnegq_s32(__a) __arm_vnegq_s32(__a)
583 #define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a)
584 #define vaddvq_s8(__a) __arm_vaddvq_s8(__a)
585 #define vaddvq_s16(__a) __arm_vaddvq_s16(__a)
586 #define vaddvq_s32(__a) __arm_vaddvq_s32(__a)
587 #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a)
588 #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a)
589 #define vmovltq_s8(__a) __arm_vmovltq_s8(__a)
590 #define vmovltq_s16(__a) __arm_vmovltq_s16(__a)
591 #define vmvnq_s8(__a) __arm_vmvnq_s8(__a)
592 #define vmvnq_s16(__a) __arm_vmvnq_s16(__a)
593 #define vmvnq_s32(__a) __arm_vmvnq_s32(__a)
594 #define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm)
595 #define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm)
596 #define vrev16q_s8(__a) __arm_vrev16q_s8(__a)
597 #define vrev32q_s8(__a) __arm_vrev32q_s8(__a)
598 #define vrev32q_s16(__a) __arm_vrev32q_s16(__a)
599 #define vrev64q_s8(__a) __arm_vrev64q_s8(__a)
600 #define vrev64q_s16(__a) __arm_vrev64q_s16(__a)
601 #define vrev64q_s32(__a) __arm_vrev64q_s32(__a)
602 #define vqabsq_s8(__a) __arm_vqabsq_s8(__a)
603 #define vqabsq_s16(__a) __arm_vqabsq_s16(__a)
604 #define vqabsq_s32(__a) __arm_vqabsq_s32(__a)
605 #define vqnegq_s8(__a) __arm_vqnegq_s8(__a)
606 #define vqnegq_s16(__a) __arm_vqnegq_s16(__a)
607 #define vqnegq_s32(__a) __arm_vqnegq_s32(__a)
608 #define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a)
609 #define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a)
610 #define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a)
611 #define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a)
612 #define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a)
613 #define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a)
614 #define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a)
615 #define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a)
616 #define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a)
617 #define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a)
618 #define vrev64q_u8(__a) __arm_vrev64q_u8(__a)
619 #define vrev64q_u16(__a) __arm_vrev64q_u16(__a)
620 #define vrev64q_u32(__a) __arm_vrev64q_u32(__a)
621 #define vmvnq_u8(__a) __arm_vmvnq_u8(__a)
622 #define vmvnq_u16(__a) __arm_vmvnq_u16(__a)
623 #define vmvnq_u32(__a) __arm_vmvnq_u32(__a)
624 #define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a)
625 #define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a)
626 #define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a)
627 #define vclzq_u8(__a) __arm_vclzq_u8(__a)
628 #define vclzq_u16(__a) __arm_vclzq_u16(__a)
629 #define vclzq_u32(__a) __arm_vclzq_u32(__a)
630 #define vaddvq_u8(__a) __arm_vaddvq_u8(__a)
631 #define vaddvq_u16(__a) __arm_vaddvq_u16(__a)
632 #define vaddvq_u32(__a) __arm_vaddvq_u32(__a)
633 #define vrev32q_u8(__a) __arm_vrev32q_u8(__a)
634 #define vrev32q_u16(__a) __arm_vrev32q_u16(__a)
635 #define vmovltq_u8(__a) __arm_vmovltq_u8(__a)
636 #define vmovltq_u16(__a) __arm_vmovltq_u16(__a)
637 #define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a)
638 #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a)
639 #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm)
640 #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm)
641 #define vrev16q_u8(__a) __arm_vrev16q_u8(__a)
642 #define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a)
643 #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a)
644 #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a)
645 #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a)
646 #define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a)
647 #define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a)
648 #define vcvtnq_u32_f32(__a) __arm_vcvtnq_u32_f32(__a)
649 #define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a)
650 #define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a)
651 #define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a)
652 #define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a)
653 #define vctp16q(__a) __arm_vctp16q(__a)
654 #define vctp32q(__a) __arm_vctp32q(__a)
655 #define vctp64q(__a) __arm_vctp64q(__a)
656 #define vctp8q(__a) __arm_vctp8q(__a)
657 #define vpnot(__a) __arm_vpnot(__a)
658 #define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b)
659 #define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b)
660 #define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b)
661 #define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b)
662 #define vcvtq_n_f16_s16(__a, __imm6) __arm_vcvtq_n_f16_s16(__a, __imm6)
663 #define vcvtq_n_f32_s32(__a, __imm6) __arm_vcvtq_n_f32_s32(__a, __imm6)
664 #define vcvtq_n_f16_u16(__a, __imm6) __arm_vcvtq_n_f16_u16(__a, __imm6)
665 #define vcvtq_n_f32_u32(__a, __imm6) __arm_vcvtq_n_f32_u32(__a, __imm6)
666 #define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b)
667 #define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b)
668 #define vcvtq_n_s16_f16(__a, __imm6) __arm_vcvtq_n_s16_f16(__a, __imm6)
669 #define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6)
670 #define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6)
671 #define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6)
672 #define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b)
673 #define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b)
674 #define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b)
675 #define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b)
676 #define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b)
677 #define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b)
678 #define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b)
679 #define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b)
680 #define vshrq_n_s8(__a, __imm) __arm_vshrq_n_s8(__a, __imm)
681 #define vshrq_n_s16(__a, __imm) __arm_vshrq_n_s16(__a, __imm)
682 #define vshrq_n_s32(__a, __imm) __arm_vshrq_n_s32(__a, __imm)
683 #define vshrq_n_u8(__a, __imm) __arm_vshrq_n_u8(__a, __imm)
684 #define vshrq_n_u16(__a, __imm) __arm_vshrq_n_u16(__a, __imm)
685 #define vshrq_n_u32(__a, __imm) __arm_vshrq_n_u32(__a, __imm)
686 #define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p)
687 #define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p)
688 #define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b)
689 #define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b)
690 #define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b)
691 #define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b)
692 #define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b)
693 #define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b)
694 #define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b)
695 #define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b)
696 #define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b)
697 #define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b)
698 #define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b)
699 #define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b)
700 #define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b)
701 #define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b)
702 #define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b)
703 #define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b)
704 #define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b)
705 #define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b)
706 #define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b)
707 #define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b)
708 #define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b)
709 #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b)
710 #define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b)
711 #define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b)
712 #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b)
713 #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b)
714 #define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b)
715 #define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b)
716 #define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b)
717 #define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b)
718 #define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b)
719 #define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b)
720 #define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b)
721 #define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
722 #define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
723 #define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
724 #define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
725 #define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
726 #define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
727 #define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
728 #define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b)
729 #define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b)
730 #define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b)
731 #define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b)
732 #define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
733 #define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
734 #define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
735 #define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
736 #define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
737 #define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
738 #define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b)
739 #define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
740 #define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
741 #define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
742 #define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
743 #define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
744 #define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
745 #define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
746 #define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
747 #define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
748 #define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
749 #define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
750 #define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b)
751 #define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b)
752 #define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm)
753 #define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm)
754 #define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm)
755 #define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b)
756 #define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b)
757 #define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b)
758 #define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b)
759 #define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b)
760 #define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b)
761 #define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b)
762 #define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b)
763 #define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b)
764 #define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b)
765 #define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b)
766 #define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm)
767 #define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
768 #define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b)
769 #define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b)
770 #define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
771 #define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
772 #define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
773 #define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b)
774 #define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b)
775 #define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b)
776 #define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b)
777 #define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
778 #define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
779 #define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
780 #define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
781 #define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
782 #define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
783 #define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b)
784 #define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b)
785 #define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b)
786 #define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b)
787 #define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b)
788 #define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
789 #define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b)
790 #define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b)
791 #define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b)
792 #define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b)
793 #define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b)
794 #define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b)
795 #define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b)
796 #define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b)
797 #define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b)
798 #define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b)
799 #define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b)
800 #define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b)
801 #define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b)
802 #define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b)
803 #define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b)
804 #define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b)
805 #define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
806 #define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
807 #define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
808 #define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
809 #define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
810 #define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
811 #define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
812 #define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
813 #define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
814 #define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
815 #define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b)
816 #define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
817 #define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm)
818 #define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm)
819 #define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm)
820 #define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b)
821 #define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b)
822 #define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b)
823 #define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b)
824 #define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b)
825 #define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b)
826 #define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b)
827 #define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b)
828 #define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b)
829 #define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b)
830 #define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b)
831 #define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b)
832 #define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b)
833 #define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b)
834 #define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b)
835 #define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b)
836 #define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b)
837 #define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b)
838 #define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b)
839 #define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b)
840 #define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b)
841 #define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
842 #define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
843 #define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
844 #define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
845 #define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
846 #define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
847 #define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
848 #define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b)
849 #define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b)
850 #define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b)
851 #define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b)
852 #define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
853 #define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
854 #define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
855 #define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
856 #define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
857 #define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
858 #define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b)
859 #define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
860 #define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
861 #define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
862 #define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
863 #define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
864 #define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
865 #define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
866 #define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
867 #define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
868 #define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
869 #define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
870 #define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b)
871 #define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b)
872 #define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm)
873 #define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm)
874 #define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm)
875 #define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b)
876 #define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b)
877 #define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b)
878 #define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b)
879 #define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b)
880 #define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b)
881 #define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b)
882 #define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b)
883 #define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b)
884 #define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b)
885 #define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b)
886 #define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm)
887 #define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
888 #define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b)
889 #define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b)
890 #define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
891 #define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
892 #define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
893 #define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b)
894 #define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b)
895 #define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b)
896 #define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b)
897 #define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
898 #define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
899 #define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
900 #define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
901 #define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
902 #define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
903 #define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b)
904 #define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b)
905 #define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b)
906 #define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b)
907 #define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b)
908 #define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
909 #define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b)
910 #define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b)
911 #define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b)
912 #define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b)
913 #define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b)
914 #define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b)
915 #define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b)
916 #define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b)
917 #define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b)
918 #define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b)
919 #define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b)
920 #define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b)
921 #define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b)
922 #define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b)
923 #define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b)
924 #define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b)
925 #define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b)
926 #define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
927 #define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
928 #define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
929 #define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
930 #define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
931 #define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
932 #define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
933 #define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
934 #define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
935 #define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b)
936 #define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
937 #define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm)
938 #define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm)
939 #define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm)
940 #define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b)
941 #define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b)
942 #define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b)
943 #define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b)
944 #define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b)
945 #define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b)
946 #define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b)
947 #define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b)
948 #define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b)
949 #define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b)
950 #define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b)
951 #define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b)
952 #define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b)
953 #define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b)
954 #define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b)
955 #define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b)
956 #define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b)
957 #define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b)
958 #define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b)
959 #define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b)
960 #define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b)
961 #define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
962 #define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
963 #define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
964 #define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
965 #define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
966 #define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
967 #define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
968 #define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b)
969 #define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b)
970 #define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b)
971 #define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b)
972 #define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
973 #define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
974 #define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
975 #define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
976 #define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
977 #define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
978 #define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b)
979 #define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
980 #define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
981 #define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
982 #define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
983 #define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
984 #define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
985 #define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
986 #define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
987 #define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
988 #define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
989 #define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
990 #define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b)
991 #define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b)
992 #define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm)
993 #define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm)
994 #define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm)
995 #define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b)
996 #define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b)
997 #define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b)
998 #define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b)
999 #define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b)
1000 #define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b)
1001 #define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b)
1002 #define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b)
1003 #define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b)
1004 #define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b)
1005 #define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b)
1006 #define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm)
1007 #define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
1008 #define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b)
1009 #define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b)
1010 #define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
1011 #define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
1012 #define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
1013 #define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b)
1014 #define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b)
1015 #define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b)
1016 #define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b)
1017 #define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
1018 #define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
1019 #define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
1020 #define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
1021 #define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
1022 #define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
1023 #define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b)
1024 #define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b)
1025 #define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b)
1026 #define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b)
1027 #define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b)
1028 #define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
1029 #define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b)
1030 #define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b)
1031 #define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b)
1032 #define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b)
1033 #define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b)
1034 #define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b)
1035 #define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b)
1036 #define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b)
1037 #define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b)
1038 #define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b)
1039 #define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b)
1040 #define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b)
1041 #define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b)
1042 #define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b)
1043 #define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b)
1044 #define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b)
1045 #define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b)
1046 #define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
1047 #define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
1048 #define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
1049 #define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
1050 #define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
1051 #define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
1052 #define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
1053 #define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
1054 #define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
1055 #define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b)
1056 #define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
1057 #define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm)
1058 #define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm)
1059 #define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm)
1060 #define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b)
1061 #define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b)
1062 #define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b)
1063 #define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b)
1064 #define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b)
1065 #define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b)
1066 #define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b)
1067 #define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b)
1068 #define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b)
1069 #define vshlltq_n_u8(__a, __imm) __arm_vshlltq_n_u8(__a, __imm)
1070 #define vshllbq_n_u8(__a, __imm) __arm_vshllbq_n_u8(__a, __imm)
1071 #define vorrq_n_u16(__a, __imm) __arm_vorrq_n_u16(__a, __imm)
1072 #define vbicq_n_u16(__a, __imm) __arm_vbicq_n_u16(__a, __imm)
1073 #define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b)
1074 #define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b)
1075 #define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b)
1076 #define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b)
1077 #define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b)
1078 #define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b)
1079 #define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b)
1080 #define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b)
1081 #define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b)
1082 #define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b)
1083 #define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b)
1084 #define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b)
1085 #define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b)
1086 #define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b)
1087 #define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b)
1088 #define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b)
1089 #define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b)
1090 #define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b)
1091 #define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b)
1092 #define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b)
1093 #define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b)
1094 #define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b)
1095 #define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b)
1096 #define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b)
1097 #define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b)
1098 #define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b)
1099 #define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b)
1100 #define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b)
1101 #define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b)
1102 #define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b)
1103 #define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b)
1104 #define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b)
1105 #define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b)
1106 #define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b)
1107 #define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b)
1108 #define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b)
1109 #define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b)
1110 #define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b)
1111 #define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b)
1112 #define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b)
1113 #define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b)
1114 #define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b)
1115 #define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b)
1116 #define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
1117 #define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
1118 #define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
1119 #define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b)
1120 #define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
1121 #define vshlltq_n_s8(__a, __imm) __arm_vshlltq_n_s8(__a, __imm)
1122 #define vshllbq_n_s8(__a, __imm) __arm_vshllbq_n_s8(__a, __imm)
1123 #define vorrq_n_s16(__a, __imm) __arm_vorrq_n_s16(__a, __imm)
1124 #define vbicq_n_s16(__a, __imm) __arm_vbicq_n_s16(__a, __imm)
1125 #define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b)
1126 #define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b)
1127 #define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b)
1128 #define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b)
1129 #define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b)
1130 #define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b)
1131 #define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b)
1132 #define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b)
1133 #define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b)
1134 #define vshlltq_n_u16(__a, __imm) __arm_vshlltq_n_u16(__a, __imm)
1135 #define vshllbq_n_u16(__a, __imm) __arm_vshllbq_n_u16(__a, __imm)
1136 #define vorrq_n_u32(__a, __imm) __arm_vorrq_n_u32(__a, __imm)
1137 #define vbicq_n_u32(__a, __imm) __arm_vbicq_n_u32(__a, __imm)
1138 #define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b)
1139 #define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b)
1140 #define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b)
1141 #define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b)
1142 #define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b)
1143 #define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b)
1144 #define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b)
1145 #define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b)
1146 #define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b)
1147 #define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b)
1148 #define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b)
1149 #define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b)
1150 #define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b)
1151 #define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b)
1152 #define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b)
1153 #define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b)
1154 #define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b)
1155 #define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b)
1156 #define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b)
1157 #define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b)
1158 #define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b)
1159 #define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b)
1160 #define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b)
1161 #define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b)
1162 #define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b)
1163 #define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b)
1164 #define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b)
1165 #define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b)
1166 #define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b)
1167 #define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b)
1168 #define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b)
1169 #define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b)
1170 #define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b)
1171 #define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b)
1172 #define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b)
1173 #define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b)
1174 #define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b)
1175 #define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b)
1176 #define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b)
1177 #define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b)
1178 #define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b)
1179 #define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b)
1180 #define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b)
1181 #define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
1182 #define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
1183 #define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
1184 #define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b)
1185 #define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
1186 #define vshlltq_n_s16(__a, __imm) __arm_vshlltq_n_s16(__a, __imm)
1187 #define vshllbq_n_s16(__a, __imm) __arm_vshllbq_n_s16(__a, __imm)
1188 #define vorrq_n_s32(__a, __imm) __arm_vorrq_n_s32(__a, __imm)
1189 #define vbicq_n_s32(__a, __imm) __arm_vbicq_n_s32(__a, __imm)
1190 #define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b)
1191 #define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p)
1192 #define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p)
1193 #define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p)
1194 #define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p)
1195 #define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b)
1196 #define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b)
1197 #define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b)
1198 #define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b)
1199 #define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b)
1200 #define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b)
1201 #define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b)
1202 #define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b)
1203 #define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c)
1204 #define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c)
1205 #define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c)
1206 #define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p)
1207 #define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p)
1208 #define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p)
1209 #define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p)
1210 #define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p)
1211 #define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p)
1212 #define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p)
1213 #define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p)
1214 #define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p)
1215 #define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p)
1216 #define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p)
1217 #define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p)
1218 #define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p)
1219 #define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p)
1220 #define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm)
1221 #define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm)
1222 #define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm)
1223 #define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm)
1224 #define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm)
1225 #define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm)
1226 #define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c)
1227 #define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c)
1228 #define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm)
1229 #define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm)
1230 #define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm)
1231 #define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm)
1232 #define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm)
1233 #define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm)
1234 #define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c)
1235 #define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c)
1236 #define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c)
1237 #define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p)
1238 #define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p)
1239 #define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p)
1240 #define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p)
1241 #define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c)
1242 #define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c)
1243 #define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p)
1244 #define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c)
1245 #define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p)
1246 #define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p)
1247 #define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p)
1248 #define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p)
1249 #define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p)
1250 #define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p)
1251 #define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p)
1252 #define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p)
1253 #define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p)
1254 #define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p)
1255 #define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p)
1256 #define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p)
1257 #define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p)
1258 #define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm)
1259 #define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm)
1260 #define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p)
1261 #define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p)
1262 #define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p)
1263 #define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p)
1264 #define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p)
1265 #define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p)
1266 #define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p)
1267 #define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p)
1268 #define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p)
1269 #define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p)
1270 #define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p)
1271 #define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p)
1272 #define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p)
1273 #define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p)
1274 #define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p)
1275 #define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p)
1276 #define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p)
1277 #define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p)
1278 #define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p)
1279 #define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p)
1280 #define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p)
1281 #define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p)
1282 #define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p)
1283 #define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p)
1284 #define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p)
1285 #define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p)
1286 #define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p)
1287 #define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p)
1288 #define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p)
1289 #define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p)
1290 #define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p)
1291 #define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p)
1292 #define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p)
1293 #define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p)
1294 #define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p)
1295 #define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p)
1296 #define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p)
1297 #define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p)
1298 #define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p)
1299 #define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p)
1300 #define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b)
1301 #define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b)
1302 #define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c)
1303 #define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c)
1304 #define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b)
1305 #define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b)
1306 #define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b)
1307 #define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b)
1308 #define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c)
1309 #define vqdmlashq_n_s8(__a, __b, __c) __arm_vqdmlashq_n_s8(__a, __b, __c)
1310 #define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b)
1311 #define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b)
1312 #define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c)
1313 #define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c)
1314 #define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c)
1315 #define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c)
1316 #define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c)
1317 #define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c)
1318 #define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm)
1319 #define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm)
1320 #define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p)
1321 #define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p)
1322 #define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p)
1323 #define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p)
1324 #define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c)
1325 #define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c)
1326 #define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p)
1327 #define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c)
1328 #define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p)
1329 #define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p)
1330 #define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p)
1331 #define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p)
1332 #define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p)
1333 #define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p)
1334 #define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p)
1335 #define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p)
1336 #define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p)
1337 #define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p)
1338 #define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p)
1339 #define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p)
1340 #define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p)
1341 #define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm)
1342 #define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm)
1343 #define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p)
1344 #define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p)
1345 #define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p)
1346 #define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p)
1347 #define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p)
1348 #define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p)
1349 #define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p)
1350 #define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p)
1351 #define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p)
1352 #define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p)
1353 #define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p)
1354 #define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p)
1355 #define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p)
1356 #define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p)
1357 #define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p)
1358 #define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p)
1359 #define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p)
1360 #define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p)
1361 #define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p)
1362 #define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p)
1363 #define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p)
1364 #define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p)
1365 #define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p)
1366 #define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p)
1367 #define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p)
1368 #define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p)
1369 #define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p)
1370 #define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p)
1371 #define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p)
1372 #define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p)
1373 #define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p)
1374 #define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p)
1375 #define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p)
1376 #define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p)
1377 #define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p)
1378 #define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p)
1379 #define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p)
1380 #define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p)
1381 #define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p)
1382 #define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p)
1383 #define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b)
1384 #define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b)
1385 #define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c)
1386 #define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c)
1387 #define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b)
1388 #define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b)
1389 #define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b)
1390 #define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b)
1391 #define vqdmlashq_n_s16(__a, __b, __c) __arm_vqdmlashq_n_s16(__a, __b, __c)
1392 #define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c)
1393 #define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b)
1394 #define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b)
1395 #define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c)
1396 #define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c)
1397 #define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c)
1398 #define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c)
1399 #define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c)
1400 #define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c)
1401 #define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm)
1402 #define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm)
1403 #define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p)
1404 #define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p)
1405 #define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p)
1406 #define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p)
1407 #define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c)
1408 #define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c)
1409 #define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p)
1410 #define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c)
1411 #define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p)
1412 #define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p)
1413 #define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p)
1414 #define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p)
1415 #define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p)
1416 #define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p)
1417 #define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p)
1418 #define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p)
1419 #define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p)
1420 #define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p)
1421 #define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p)
1422 #define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p)
1423 #define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p)
1424 #define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm)
1425 #define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm)
1426 #define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p)
1427 #define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p)
1428 #define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p)
1429 #define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p)
1430 #define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p)
1431 #define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p)
1432 #define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p)
1433 #define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p)
1434 #define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p)
1435 #define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p)
1436 #define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p)
1437 #define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p)
1438 #define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p)
1439 #define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p)
1440 #define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p)
1441 #define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p)
1442 #define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p)
1443 #define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p)
1444 #define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p)
1445 #define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p)
1446 #define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p)
1447 #define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p)
1448 #define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p)
1449 #define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p)
1450 #define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p)
1451 #define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p)
1452 #define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p)
1453 #define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p)
1454 #define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p)
1455 #define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p)
1456 #define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p)
1457 #define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p)
1458 #define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p)
1459 #define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p)
1460 #define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p)
1461 #define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p)
1462 #define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p)
1463 #define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p)
1464 #define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p)
1465 #define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p)
1466 #define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b)
1467 #define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b)
1468 #define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c)
1469 #define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c)
1470 #define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b)
1471 #define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b)
1472 #define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b)
1473 #define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b)
1474 #define vqdmlashq_n_s32(__a, __b, __c) __arm_vqdmlashq_n_s32(__a, __b, __c)
1475 #define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c)
1476 #define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b)
1477 #define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b)
1478 #define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c)
1479 #define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c)
1480 #define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c)
1481 #define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c)
1482 #define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c)
1483 #define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c)
1484 #define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm)
1485 #define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm)
1486 #define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p)
1487 #define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p)
1488 #define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c)
1489 #define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c)
1490 #define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c)
1491 #define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p)
1492 #define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p)
1493 #define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p)
1494 #define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p)
1495 #define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p)
1496 #define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p)
1497 #define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p)
1498 #define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p)
1499 #define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p)
1500 #define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p)
1501 #define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p)
1502 #define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p)
1503 #define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p)
1504 #define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p)
1505 #define vmvnq_m_n_s16(__inactive, __imm, __p) __arm_vmvnq_m_n_s16(__inactive, __imm, __p)
1506 #define vorrq_m_n_s16(__a, __imm, __p) __arm_vorrq_m_n_s16(__a, __imm, __p)
1507 #define vqrshrntq_n_s16(__a, __b, __imm) __arm_vqrshrntq_n_s16(__a, __b, __imm)
1508 #define vqshrnbq_n_s16(__a, __b, __imm) __arm_vqshrnbq_n_s16(__a, __b, __imm)
1509 #define vqshrntq_n_s16(__a, __b, __imm) __arm_vqshrntq_n_s16(__a, __b, __imm)
1510 #define vrshrnbq_n_s16(__a, __b, __imm) __arm_vrshrnbq_n_s16(__a, __b, __imm)
1511 #define vrshrntq_n_s16(__a, __b, __imm) __arm_vrshrntq_n_s16(__a, __b, __imm)
1512 #define vshrnbq_n_s16(__a, __b, __imm) __arm_vshrnbq_n_s16(__a, __b, __imm)
1513 #define vshrntq_n_s16(__a, __b, __imm) __arm_vshrntq_n_s16(__a, __b, __imm)
1514 #define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c)
1515 #define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c)
1516 #define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c)
1517 #define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c)
1518 #define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c)
1519 #define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c)
1520 #define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c)
1521 #define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c)
1522 #define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c)
1523 #define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c)
1524 #define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c)
1525 #define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c)
1526 #define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p)
1527 #define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p)
1528 #define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p)
1529 #define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p)
1530 #define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p)
1531 #define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p)
1532 #define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p)
1533 #define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p)
1534 #define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p)
1535 #define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p)
1536 #define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p)
1537 #define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p)
1538 #define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p)
1539 #define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p)
1540 #define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p)
1541 #define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p)
1542 #define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p)
1543 #define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p)
1544 #define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p)
1545 #define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p)
1546 #define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p)
1547 #define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p)
1548 #define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p)
1549 #define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p)
1550 #define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p)
1551 #define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p)
1552 #define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p)
1553 #define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p)
1554 #define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p)
1555 #define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p)
1556 #define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p)
1557 #define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p)
1558 #define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p)
1559 #define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p)
1560 #define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p)
1561 #define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p)
1562 #define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p)
1563 #define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p)
1564 #define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p)
1565 #define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p)
1566 #define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p)
1567 #define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p)
1568 #define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p)
1569 #define vmvnq_m_n_u16(__inactive, __imm, __p) __arm_vmvnq_m_n_u16(__inactive, __imm, __p)
1570 #define vorrq_m_n_u16(__a, __imm, __p) __arm_vorrq_m_n_u16(__a, __imm, __p)
1571 #define vqrshruntq_n_s16(__a, __b, __imm) __arm_vqrshruntq_n_s16(__a, __b, __imm)
1572 #define vqshrunbq_n_s16(__a, __b, __imm) __arm_vqshrunbq_n_s16(__a, __b, __imm)
1573 #define vqshruntq_n_s16(__a, __b, __imm) __arm_vqshruntq_n_s16(__a, __b, __imm)
1574 #define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p)
1575 #define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p)
1576 #define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p)
1577 #define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p)
1578 #define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p)
1579 #define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p)
1580 #define vqrshrntq_n_u16(__a, __b, __imm) __arm_vqrshrntq_n_u16(__a, __b, __imm)
1581 #define vqshrnbq_n_u16(__a, __b, __imm) __arm_vqshrnbq_n_u16(__a, __b, __imm)
1582 #define vqshrntq_n_u16(__a, __b, __imm) __arm_vqshrntq_n_u16(__a, __b, __imm)
1583 #define vrshrnbq_n_u16(__a, __b, __imm) __arm_vrshrnbq_n_u16(__a, __b, __imm)
1584 #define vrshrntq_n_u16(__a, __b, __imm) __arm_vrshrntq_n_u16(__a, __b, __imm)
1585 #define vshrnbq_n_u16(__a, __b, __imm) __arm_vshrnbq_n_u16(__a, __b, __imm)
1586 #define vshrntq_n_u16(__a, __b, __imm) __arm_vshrntq_n_u16(__a, __b, __imm)
1587 #define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c)
1588 #define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p)
1589 #define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p)
1590 #define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p)
1591 #define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p)
1592 #define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p)
1593 #define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p)
1594 #define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p)
1595 #define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p)
1596 #define vmvnq_m_n_s32(__inactive, __imm, __p) __arm_vmvnq_m_n_s32(__inactive, __imm, __p)
1597 #define vorrq_m_n_s32(__a, __imm, __p) __arm_vorrq_m_n_s32(__a, __imm, __p)
1598 #define vqrshrntq_n_s32(__a, __b, __imm) __arm_vqrshrntq_n_s32(__a, __b, __imm)
1599 #define vqshrnbq_n_s32(__a, __b, __imm) __arm_vqshrnbq_n_s32(__a, __b, __imm)
1600 #define vqshrntq_n_s32(__a, __b, __imm) __arm_vqshrntq_n_s32(__a, __b, __imm)
1601 #define vrshrnbq_n_s32(__a, __b, __imm) __arm_vrshrnbq_n_s32(__a, __b, __imm)
1602 #define vrshrntq_n_s32(__a, __b, __imm) __arm_vrshrntq_n_s32(__a, __b, __imm)
1603 #define vshrnbq_n_s32(__a, __b, __imm) __arm_vshrnbq_n_s32(__a, __b, __imm)
1604 #define vshrntq_n_s32(__a, __b, __imm) __arm_vshrntq_n_s32(__a, __b, __imm)
1605 #define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c)
1606 #define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c)
1607 #define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c)
1608 #define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c)
1609 #define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c)
1610 #define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c)
1611 #define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c)
1612 #define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c)
1613 #define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c)
1614 #define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c)
1615 #define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c)
1616 #define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c)
1617 #define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p)
1618 #define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p)
1619 #define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p)
1620 #define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p)
1621 #define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p)
1622 #define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p)
1623 #define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p)
1624 #define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p)
1625 #define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p)
1626 #define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p)
1627 #define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p)
1628 #define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p)
1629 #define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p)
1630 #define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p)
1631 #define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p)
1632 #define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p)
1633 #define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p)
1634 #define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p)
1635 #define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p)
1636 #define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p)
1637 #define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p)
1638 #define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p)
1639 #define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p)
1640 #define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p)
1641 #define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p)
1642 #define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p)
1643 #define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p)
1644 #define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p)
1645 #define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p)
1646 #define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p)
1647 #define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p)
1648 #define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p)
1649 #define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p)
1650 #define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p)
1651 #define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p)
1652 #define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p)
1653 #define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p)
1654 #define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p)
1655 #define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p)
1656 #define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p)
1657 #define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p)
1658 #define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p)
1659 #define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p)
1660 #define vmvnq_m_n_u32(__inactive, __imm, __p) __arm_vmvnq_m_n_u32(__inactive, __imm, __p)
1661 #define vorrq_m_n_u32(__a, __imm, __p) __arm_vorrq_m_n_u32(__a, __imm, __p)
1662 #define vqrshruntq_n_s32(__a, __b, __imm) __arm_vqrshruntq_n_s32(__a, __b, __imm)
1663 #define vqshrunbq_n_s32(__a, __b, __imm) __arm_vqshrunbq_n_s32(__a, __b, __imm)
1664 #define vqshruntq_n_s32(__a, __b, __imm) __arm_vqshruntq_n_s32(__a, __b, __imm)
1665 #define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p)
1666 #define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p)
1667 #define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p)
1668 #define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p)
1669 #define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p)
1670 #define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p)
1671 #define vqrshrntq_n_u32(__a, __b, __imm) __arm_vqrshrntq_n_u32(__a, __b, __imm)
1672 #define vqshrnbq_n_u32(__a, __b, __imm) __arm_vqshrnbq_n_u32(__a, __b, __imm)
1673 #define vqshrntq_n_u32(__a, __b, __imm) __arm_vqshrntq_n_u32(__a, __b, __imm)
1674 #define vrshrnbq_n_u32(__a, __b, __imm) __arm_vrshrnbq_n_u32(__a, __b, __imm)
1675 #define vrshrntq_n_u32(__a, __b, __imm) __arm_vrshrntq_n_u32(__a, __b, __imm)
1676 #define vshrnbq_n_u32(__a, __b, __imm) __arm_vshrnbq_n_u32(__a, __b, __imm)
1677 #define vshrntq_n_u32(__a, __b, __imm) __arm_vshrntq_n_u32(__a, __b, __imm)
1678 #define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c)
1679 #define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p)
1680 #define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p)
1681 #define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p)
1682 #define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p)
1683 #define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p)
1684 #define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p)
1685 #define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p)
1686 #define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p)
1687 #define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p)
1688 #define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p)
1689 #define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p)
1690 #define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p)
1691 #define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p)
1692 #define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p)
1693 #define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p)
1694 #define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p)
1695 #define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p)
1696 #define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p)
1697 #define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p)
1698 #define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p)
1699 #define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p)
1700 #define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p)
1701 #define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p)
1702 #define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p)
1703 #define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p)
1704 #define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p)
1705 #define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p)
1706 #define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p)
1707 #define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p)
1708 #define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p)
1709 #define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p)
1710 #define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p)
1711 #define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p)
1712 #define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p)
1713 #define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p)
1714 #define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p)
1715 #define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p)
1716 #define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p)
1717 #define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p)
1718 #define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p)
1719 #define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p)
1720 #define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p)
1721 #define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p)
1722 #define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p)
1723 #define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p)
1724 #define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p)
1725 #define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p)
1726 #define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p)
1727 #define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p)
1728 #define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p)
1729 #define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p)
1730 #define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p)
1731 #define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p)
1732 #define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p)
1733 #define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p)
1734 #define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p)
1735 #define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p)
1736 #define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p)
1737 #define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p)
1738 #define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p)
1739 #define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p)
1740 #define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p)
1741 #define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p)
1742 #define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p)
1743 #define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p)
1744 #define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p)
1745 #define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p)
1746 #define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p)
1747 #define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p)
1748 #define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p)
1749 #define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p)
1750 #define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p)
1751 #define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p)
1752 #define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p)
1753 #define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p)
1754 #define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p)
1755 #define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p)
1756 #define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p)
1757 #define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p)
1758 #define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p)
1759 #define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p)
1760 #define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p)
1761 #define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p)
1762 #define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p)
1763 #define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
1764 #define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
1765 #define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
1766 #define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p)
1767 #define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p)
1768 #define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p)
1769 #define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p)
1770 #define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p)
1771 #define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p)
1772 #define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p)
1773 #define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p)
1774 #define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p)
1775 #define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p)
1776 #define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p)
1777 #define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p)
1778 #define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p)
1779 #define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p)
1780 #define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p)
1781 #define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p)
1782 #define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p)
1783 #define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p)
1784 #define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p)
1785 #define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p)
1786 #define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p)
1787 #define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p)
1788 #define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p)
1789 #define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p)
1790 #define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p)
1791 #define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p)
1792 #define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p)
1793 #define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p)
1794 #define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p)
1795 #define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p)
1796 #define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p)
1797 #define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p)
1798 #define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p)
1799 #define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p)
1800 #define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p)
1801 #define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p)
1802 #define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p)
1803 #define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p)
1804 #define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p)
1805 #define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p)
1806 #define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p)
1807 #define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p)
1808 #define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p)
1809 #define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p)
1810 #define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p)
1811 #define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p)
1812 #define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p)
1813 #define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p)
1814 #define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p)
1815 #define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p)
1816 #define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p)
1817 #define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p)
1818 #define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p)
1819 #define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p)
1820 #define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p)
1821 #define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p)
1822 #define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p)
1823 #define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p)
1824 #define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p)
1825 #define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p)
1826 #define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p)
1827 #define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p)
1828 #define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p)
1829 #define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p)
1830 #define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p)
1831 #define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p)
1832 #define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p)
1833 #define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p)
1834 #define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p)
1835 #define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p)
1836 #define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p)
1837 #define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p)
1838 #define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p)
1839 #define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p)
1840 #define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p)
1841 #define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p)
1842 #define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p)
1843 #define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p)
1844 #define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p)
1845 #define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p)
1846 #define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p)
1847 #define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p)
1848 #define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p)
1849 #define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p)
1850 #define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p)
1851 #define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p)
1852 #define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p)
1853 #define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p)
1854 #define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p)
1855 #define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p)
1856 #define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p)
1857 #define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p)
1858 #define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p)
1859 #define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p)
1860 #define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p)
1861 #define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p)
1862 #define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p)
1863 #define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p)
1864 #define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p)
1865 #define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p)
1866 #define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p)
1867 #define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p)
1868 #define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p)
1869 #define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p)
1870 #define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p)
1871 #define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p)
1872 #define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p)
1873 #define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p)
1874 #define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p)
1875 #define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p)
1876 #define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p)
1877 #define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p)
1878 #define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p)
1879 #define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p)
1880 #define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p)
1881 #define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p)
1882 #define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p)
1883 #define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p)
1884 #define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p)
1885 #define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p)
1886 #define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p)
1887 #define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p)
1888 #define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p)
1889 #define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p)
1890 #define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p)
1891 #define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p)
1892 #define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p)
1893 #define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p)
1894 #define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p)
1895 #define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p)
1896 #define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p)
1897 #define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p)
1898 #define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p)
1899 #define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p)
1900 #define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p)
1901 #define vqdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s8(__a, __b, __c, __p)
1902 #define vqdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s32(__a, __b, __c, __p)
1903 #define vqdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s16(__a, __b, __c, __p)
1904 #define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p)
1905 #define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p)
1906 #define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p)
1907 #define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p)
1908 #define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p)
1909 #define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p)
1910 #define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p)
1911 #define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p)
1912 #define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p)
1913 #define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p)
1914 #define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p)
1915 #define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p)
1916 #define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p)
1917 #define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p)
1918 #define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p)
1919 #define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p)
1920 #define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p)
1921 #define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p)
1922 #define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p)
1923 #define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p)
1924 #define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p)
1925 #define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p)
1926 #define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p)
1927 #define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p)
1928 #define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p)
1929 #define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p)
1930 #define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p)
1931 #define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p)
1932 #define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p)
1933 #define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p)
1934 #define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p)
1935 #define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p)
1936 #define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p)
1937 #define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p)
1938 #define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p)
1939 #define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p)
1940 #define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p)
1941 #define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p)
1942 #define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p)
1943 #define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p)
1944 #define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p)
1945 #define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p)
1946 #define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p)
1947 #define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p)
1948 #define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p)
1949 #define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p)
1950 #define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p)
1951 #define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p)
1952 #define vqshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a, __imm, __p)
1953 #define vqshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a, __imm, __p)
1954 #define vqshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a, __imm, __p)
1955 #define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p)
1956 #define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p)
1957 #define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p)
1958 #define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p)
1959 #define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p)
1960 #define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p)
1961 #define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p)
1962 #define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p)
1963 #define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p)
1964 #define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p)
1965 #define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p)
1966 #define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p)
1967 #define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p)
1968 #define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p)
1969 #define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p)
1970 #define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p)
1971 #define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p)
1972 #define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p)
1973 #define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p)
1974 #define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p)
1975 #define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p)
1976 #define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p)
1977 #define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p)
1978 #define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p)
1979 #define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p)
1980 #define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p)
1981 #define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p)
1982 #define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p)
1983 #define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p)
1984 #define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p)
1985 #define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p)
1986 #define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p)
1987 #define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p)
1988 #define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p)
1989 #define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p)
1990 #define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p)
1991 #define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p)
1992 #define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p)
1993 #define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p)
1994 #define vrshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a, __imm, __p)
1995 #define vrshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a, __imm, __p)
1996 #define vrshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a, __imm, __p)
1997 #define vshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a, __imm, __p)
1998 #define vshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a, __imm, __p)
1999 #define vshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a, __imm, __p)
2000 #define vshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a, __imm, __p)
2001 #define vshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a, __imm, __p)
2002 #define vshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a, __imm, __p)
2003 #define vshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a, __imm, __p)
2004 #define vshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a, __imm, __p)
2005 #define vshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a, __imm, __p)
2006 #define vshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a, __imm, __p)
2007 #define vshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a, __imm, __p)
2008 #define vshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a, __imm, __p)
2009 #define vsliq_m_n_s8(__a, __b, __imm, __p) __arm_vsliq_m_n_s8(__a, __b, __imm, __p)
2010 #define vsliq_m_n_s32(__a, __b, __imm, __p) __arm_vsliq_m_n_s32(__a, __b, __imm, __p)
2011 #define vsliq_m_n_s16(__a, __b, __imm, __p) __arm_vsliq_m_n_s16(__a, __b, __imm, __p)
2012 #define vsliq_m_n_u8(__a, __b, __imm, __p) __arm_vsliq_m_n_u8(__a, __b, __imm, __p)
2013 #define vsliq_m_n_u32(__a, __b, __imm, __p) __arm_vsliq_m_n_u32(__a, __b, __imm, __p)
2014 #define vsliq_m_n_u16(__a, __b, __imm, __p) __arm_vsliq_m_n_u16(__a, __b, __imm, __p)
2015 #define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p)
2016 #define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p)
2017 #define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p)
2018 #define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p)
2019 #define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p)
2020 #define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p)
2021 #define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p)
2022 #define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p)
2023 #define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p)
2024 #define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p)
2025 #define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p)
2026 #define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p)
2027 #define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p)
2028 #define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p)
2029 #define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p)
2030 #define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p)
2031 #define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p)
2032 #define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p)
2033 #define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p)
2034 #define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p)
2035 #define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p)
2036 #define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p)
2037 #define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p)
2038 #define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p)
2039 #define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p)
2040 #define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p)
2041 #define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p)
2042 #define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p)
2043 #define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p)
2044 #define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p)
2045 #define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p)
2046 #define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p)
2047 #define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p)
2048 #define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p)
2049 #define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p)
2050 #define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p)
2051 #define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p)
2052 #define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p)
2053 #define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p)
2054 #define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p)
2055 #define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p)
2056 #define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p)
2057 #define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p)
2058 #define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p)
2059 #define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p)
2060 #define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p)
2061 #define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p)
2062 #define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p)
2063 #define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p)
2064 #define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p)
2065 #define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p)
2066 #define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p)
2067 #define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p)
2068 #define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p)
2069 #define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p)
2070 #define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p)
2071 #define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p)
2072 #define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p)
2073 #define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p)
2074 #define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p)
2075 #define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p)
2076 #define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p)
2077 #define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p)
2078 #define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p)
2079 #define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p)
2080 #define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p)
2081 #define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p)
2082 #define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p)
2083 #define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p)
2084 #define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p)
2085 #define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p)
2086 #define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p)
2087 #define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p)
2088 #define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p)
2089 #define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p)
2090 #define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p)
2091 #define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p)
2092 #define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p)
2093 #define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p)
2094 #define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p)
2095 #define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p)
2096 #define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p)
2097 #define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p)
2098 #define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p)
2099 #define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p)
2100 #define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p)
2101 #define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p)
2102 #define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p)
2103 #define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p)
2104 #define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p)
2105 #define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p)
2106 #define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p)
2107 #define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p)
2108 #define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p)
2109 #define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p)
2110 #define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p)
2111 #define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p)
2112 #define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p)
2113 #define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p)
2114 #define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p)
2115 #define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p)
2116 #define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p)
2117 #define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p)
2118 #define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p)
2119 #define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p)
2120 #define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p)
2121 #define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p)
2122 #define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p)
2123 #define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p)
2124 #define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p)
2125 #define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p)
2126 #define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p)
2127 #define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p)
2128 #define vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p)
2129 #define vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p)
2130 #define vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p)
2131 #define vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p)
2132 #define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p)
2133 #define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p)
2134 #define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p)
2135 #define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p)
2136 #define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p)
2137 #define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p)
2138 #define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p)
2139 #define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p)
2140 #define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p)
2141 #define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p)
2142 #define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p)
2143 #define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p)
2144 #define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p)
2145 #define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p)
2146 #define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p)
2147 #define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p)
2148 #define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p)
2149 #define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p)
2150 #define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p)
2151 #define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p)
2152 #define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p)
2153 #define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p)
2154 #define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p)
2155 #define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p)
2156 #define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p)
2157 #define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p)
2158 #define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value)
2159 #define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value)
2160 #define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value)
2161 #define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value)
2162 #define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value)
2163 #define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value)
2164 #define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value)
2165 #define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value)
2166 #define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value)
2167 #define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value)
2168 #define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value)
2169 #define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value)
2170 #define vstrwq_scatter_base_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_s32(__addr, __offset, __value)
2171 #define vstrwq_scatter_base_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_u32(__addr, __offset, __value)
2172 #define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset)
2173 #define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset)
2174 #define vldrbq_s8(__base) __arm_vldrbq_s8(__base)
2175 #define vldrbq_u8(__base) __arm_vldrbq_u8(__base)
2176 #define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset)
2177 #define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset)
2178 #define vldrbq_s16(__base) __arm_vldrbq_s16(__base)
2179 #define vldrbq_u16(__base) __arm_vldrbq_u16(__base)
2180 #define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset)
2181 #define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset)
2182 #define vldrbq_s32(__base) __arm_vldrbq_s32(__base)
2183 #define vldrbq_u32(__base) __arm_vldrbq_u32(__base)
2184 #define vldrwq_gather_base_s32(__addr, __offset) __arm_vldrwq_gather_base_s32(__addr, __offset)
2185 #define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset)
2186 #define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p)
2187 #define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p)
2188 #define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p)
2189 #define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p)
2190 #define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p)
2191 #define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p)
2192 #define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p)
2193 #define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p)
2194 #define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p)
2195 #define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p)
2196 #define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p)
2197 #define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p)
2198 #define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p)
2199 #define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p)
2200 #define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p)
2201 #define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p)
2202 #define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p)
2203 #define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p)
2204 #define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p)
2205 #define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p)
2206 #define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p)
2207 #define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p)
2208 #define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p)
2209 #define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p)
2210 #define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p)
2211 #define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p)
2212 #define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p)
2213 #define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p)
2214 #define vld1q_s8(__base) __arm_vld1q_s8(__base)
2215 #define vld1q_s32(__base) __arm_vld1q_s32(__base)
2216 #define vld1q_s16(__base) __arm_vld1q_s16(__base)
2217 #define vld1q_u8(__base) __arm_vld1q_u8(__base)
2218 #define vld1q_u32(__base) __arm_vld1q_u32(__base)
2219 #define vld1q_u16(__base) __arm_vld1q_u16(__base)
2220 #define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset)
2221 #define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset)
2222 #define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset)
2223 #define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset)
2224 #define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p)
2225 #define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p)
2226 #define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p)
2227 #define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p)
2228 #define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset)
2229 #define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset)
2230 #define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset)
2231 #define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset)
2232 #define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p)
2233 #define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p)
2234 #define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p)
2235 #define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p)
2236 #define vldrhq_s32(__base) __arm_vldrhq_s32(__base)
2237 #define vldrhq_s16(__base) __arm_vldrhq_s16(__base)
2238 #define vldrhq_u32(__base) __arm_vldrhq_u32(__base)
2239 #define vldrhq_u16(__base) __arm_vldrhq_u16(__base)
2240 #define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p)
2241 #define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p)
2242 #define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p)
2243 #define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p)
2244 #define vldrwq_s32(__base) __arm_vldrwq_s32(__base)
2245 #define vldrwq_u32(__base) __arm_vldrwq_u32(__base)
2246 #define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p)
2247 #define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p)
2248 #define vld1q_f32(__base) __arm_vld1q_f32(__base)
2249 #define vld1q_f16(__base) __arm_vld1q_f16(__base)
2250 #define vldrhq_f16(__base) __arm_vldrhq_f16(__base)
2251 #define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p)
2252 #define vldrwq_f32(__base) __arm_vldrwq_f32(__base)
2253 #define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p)
2254 #define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset)
2255 #define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset)
2256 #define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p)
2257 #define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p)
2258 #define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset)
2259 #define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset)
2260 #define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p)
2261 #define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p)
2262 #define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset)
2263 #define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset)
2264 #define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p)
2265 #define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p)
2266 #define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset)
2267 #define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p)
2268 #define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset)
2269 #define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p)
2270 #define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset)
2271 #define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p)
2272 #define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset)
2273 #define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset)
2274 #define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset)
2275 #define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p)
2276 #define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p)
2277 #define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p)
2278 #define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset)
2279 #define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset)
2280 #define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset)
2281 #define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p)
2282 #define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p)
2283 #define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p)
2284 #define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value)
2285 #define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value)
2286 #define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value)
2287 #define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value)
2288 #define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value)
2289 #define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value)
2290 #define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value)
2291 #define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value)
2292 #define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value)
2293 #define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value)
2294 #define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value)
2295 #define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value)
2296 #define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value)
2297 #define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p)
2298 #define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p)
2299 #define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p)
2300 #define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p)
2301 #define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value)
2302 #define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value)
2303 #define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value)
2304 #define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value)
2305 #define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p)
2306 #define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p)
2307 #define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p)
2308 #define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p)
2309 #define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value)
2310 #define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value)
2311 #define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value)
2312 #define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value)
2313 #define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p)
2314 #define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p)
2315 #define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p)
2316 #define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p)
2317 #define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p)
2318 #define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value)
2319 #define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value)
2320 #define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value)
2321 #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p)
2322 #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p)
2323 #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p)
2324 #define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p)
2325 #define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p)
2326 #define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value)
2327 #define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value)
2328 #define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p)
2329 #define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p)
2330 #define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value)
2331 #define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value)
2332 #define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p)
2333 #define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p)
2334 #define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value)
2335 #define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value)
2336 #define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value)
2337 #define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p)
2338 #define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value)
2339 #define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p)
2340 #define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value)
2341 #define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p)
2342 #define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value)
2343 #define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p)
2344 #define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p)
2345 #define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p)
2346 #define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value)
2347 #define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value)
2348 #define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value)
2349 #define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p)
2350 #define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p)
2351 #define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
2352 #define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
2353 #define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
2354 #define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
2355 #define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
2356 #define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
2357 #define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
2358 #define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
2359 #define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
2360 #define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
2361 #define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
2362 #define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a)
2363 #define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a)
2364 #define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a)
2365 #define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a)
2366 #define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a)
2367 #define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a)
2368 #define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a)
2369 #define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a)
2370 #define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a)
2371 #define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a)
2372 #define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a)
2373 #define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a)
2374 #define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a)
2375 #define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a)
2376 #define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a)
2377 #define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a)
2378 #define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a)
2379 #define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a)
2380 #define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a)
2381 #define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a)
2382 #define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a)
2383 #define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a)
2384 #define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a)
2385 #define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a)
2386 #define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a)
2387 #define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a)
2388 #define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a)
2389 #define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a)
2390 #define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a)
2391 #define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a)
2392 #define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a)
2393 #define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a)
2394 #define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a)
2395 #define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a)
2396 #define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a)
2397 #define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a)
2398 #define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a)
2399 #define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a)
2400 #define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a)
2401 #define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a)
2402 #define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a)
2403 #define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a)
2404 #define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a)
2405 #define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a)
2406 #define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a)
2407 #define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a)
2408 #define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a)
2409 #define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a)
2410 #define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a)
2411 #define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a)
2412 #define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a)
2413 #define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a)
2414 #define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a)
2415 #define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a)
2416 #define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a)
2417 #define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a)
2418 #define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a)
2419 #define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a)
2420 #define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a)
2421 #define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a)
2422 #define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a)
2423 #define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a)
2424 #define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a)
2425 #define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a)
2426 #define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a)
2427 #define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a)
2428 #define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a)
2429 #define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a)
2430 #define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a)
2431 #define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a)
2432 #define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a)
2433 #define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a)
2434 #define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a)
2435 #define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a)
2436 #define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a)
2437 #define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a)
2438 #define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a)
2439 #define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a)
2440 #define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a)
2441 #define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a)
2442 #define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a)
2443 #define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a)
2444 #define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a)
2445 #define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a)
2446 #define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a)
2447 #define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a)
2448 #define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a)
2449 #define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a)
2450 #define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a)
2451 #define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a)
2452 #define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
2453 #define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
2454 #define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
2455 #define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void)
2456 #define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void)
2457 #define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void)
2458 #define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void)
2459 #define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void)
2460 #define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void)
2461 #define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void)
2462 #define vddupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a, __imm, __p)
2463 #define vddupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a, __imm, __p)
2464 #define vddupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a, __imm, __p)
2465 #define vddupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u8(__inactive, __a, __imm, __p)
2466 #define vddupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u16(__inactive, __a, __imm, __p)
2467 #define vddupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u32(__inactive, __a, __imm, __p)
2468 #define vddupq_n_u8(__a, __imm) __arm_vddupq_n_u8(__a, __imm)
2469 #define vddupq_n_u32(__a, __imm) __arm_vddupq_n_u32(__a, __imm)
2470 #define vddupq_n_u16(__a, __imm) __arm_vddupq_n_u16(__a, __imm)
2471 #define vddupq_wb_u8( __a, __imm) __arm_vddupq_wb_u8( __a, __imm)
2472 #define vddupq_wb_u16( __a, __imm) __arm_vddupq_wb_u16( __a, __imm)
2473 #define vddupq_wb_u32( __a, __imm) __arm_vddupq_wb_u32( __a, __imm)
2474 #define vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p)
2475 #define vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p)
2476 #define vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p)
2477 #define vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p)
2478 #define vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p)
2479 #define vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p)
2480 #define vdwdupq_n_u8(__a, __b, __imm) __arm_vdwdupq_n_u8(__a, __b, __imm)
2481 #define vdwdupq_n_u32(__a, __b, __imm) __arm_vdwdupq_n_u32(__a, __b, __imm)
2482 #define vdwdupq_n_u16(__a, __b, __imm) __arm_vdwdupq_n_u16(__a, __b, __imm)
2483 #define vdwdupq_wb_u8( __a, __b, __imm) __arm_vdwdupq_wb_u8( __a, __b, __imm)
2484 #define vdwdupq_wb_u32( __a, __b, __imm) __arm_vdwdupq_wb_u32( __a, __b, __imm)
2485 #define vdwdupq_wb_u16( __a, __b, __imm) __arm_vdwdupq_wb_u16( __a, __b, __imm)
2486 #define vidupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a, __imm, __p)
2487 #define vidupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a, __imm, __p)
2488 #define vidupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a, __imm, __p)
2489 #define vidupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u8(__inactive, __a, __imm, __p)
2490 #define vidupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u16(__inactive, __a, __imm, __p)
2491 #define vidupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u32(__inactive, __a, __imm, __p)
2492 #define vidupq_n_u8(__a, __imm) __arm_vidupq_n_u8(__a, __imm)
2493 #define vidupq_n_u32(__a, __imm) __arm_vidupq_n_u32(__a, __imm)
2494 #define vidupq_n_u16(__a, __imm) __arm_vidupq_n_u16(__a, __imm)
2495 #define vidupq_wb_u8( __a, __imm) __arm_vidupq_wb_u8( __a, __imm)
2496 #define vidupq_wb_u16( __a, __imm) __arm_vidupq_wb_u16( __a, __imm)
2497 #define vidupq_wb_u32( __a, __imm) __arm_vidupq_wb_u32( __a, __imm)
2498 #define viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p)
2499 #define viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p)
2500 #define viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p)
2501 #define viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p)
2502 #define viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p)
2503 #define viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p)
2504 #define viwdupq_n_u8(__a, __b, __imm) __arm_viwdupq_n_u8(__a, __b, __imm)
2505 #define viwdupq_n_u32(__a, __b, __imm) __arm_viwdupq_n_u32(__a, __b, __imm)
2506 #define viwdupq_n_u16(__a, __b, __imm) __arm_viwdupq_n_u16(__a, __b, __imm)
2507 #define viwdupq_wb_u8( __a, __b, __imm) __arm_viwdupq_wb_u8( __a, __b, __imm)
2508 #define viwdupq_wb_u32( __a, __b, __imm) __arm_viwdupq_wb_u32( __a, __b, __imm)
2509 #define viwdupq_wb_u16( __a, __b, __imm) __arm_viwdupq_wb_u16( __a, __b, __imm)
2510 #define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset)
2511 #define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset)
2512 #define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p)
2513 #define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p)
2514 #define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset)
2515 #define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset)
2516 #define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset)
2517 #define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p)
2518 #define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p)
2519 #define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p)
2520 #define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p)
2521 #define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p)
2522 #define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value)
2523 #define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value)
2524 #define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p)
2525 #define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p)
2526 #define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p)
2527 #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
2528 #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
2529 #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
2530 #define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p)
2531 #define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p)
2532 #define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p)
2533 #define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p)
2534 #define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p)
2535 #define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p)
2536 #define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p)
2537 #define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p)
2538 #define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p)
2539 #define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p)
2540 #define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p)
2541 #define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p)
2542 #define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p)
2543 #define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p)
2544 #define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p)
2545 #define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p)
2546 #define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p)
2547 #define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p)
2548 #define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p)
2549 #define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p)
2550 #define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p)
2551 #define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p)
2552 #define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p)
2553 #define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p)
2554 #define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p)
2555 #define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p)
2556 #define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p)
2557 #define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p)
2558 #define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p)
2559 #define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p)
2560 #define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p)
2561 #define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p)
2562 #define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p)
2563 #define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p)
2564 #define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p)
2565 #define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p)
2566 #define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p)
2567 #define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p)
2568 #define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p)
2569 #define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p)
2570 #define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p)
2571 #define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p)
2572 #define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p)
2573 #define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p)
2574 #define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p)
2575 #define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p)
2576 #define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p)
2577 #define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p)
2578 #define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p)
2579 #define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p)
2580 #define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p)
2581 #define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p)
2582 #define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p)
2583 #define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p)
2584 #define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p)
2585 #define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p)
2586 #define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p)
2587 #define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p)
2588 #define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p)
2589 #define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p)
2590 #define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p)
2591 #define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p)
2592 #define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p)
2593 #define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p)
2594 #define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p)
2595 #define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p)
2596 #define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p)
2597 #define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p)
2598 #define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p)
2599 #define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p)
2600 #define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p)
2601 #define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p)
2602 #define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p)
2603 #define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p)
2604 #define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p)
2605 #define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p)
2606 #define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p)
2607 #define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p)
2608 #define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p)
2609 #define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p)
2610 #define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p)
2611 #define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p)
2612 #define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p)
2613 #define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p)
2614 #define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p)
2615 #define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p)
2616 #define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p)
2617 #define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p)
2618 #define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p)
2619 #define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p)
2620 #define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p)
2621 #define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p)
2622 #define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p)
2623 #define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p)
2624 #define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p)
2625 #define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p)
2626 #define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p)
2627 #define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p)
2628 #define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p)
2629 #define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p)
2630 #define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p)
2631 #define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p)
2632 #define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p)
2633 #define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p)
2634 #define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p)
2635 #define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p)
2636 #define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p)
2637 #define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p)
2638 #define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p)
2639 #define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p)
2640 #define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p)
2641 #define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p)
2642 #define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p)
2643 #define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p)
2644 #define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p)
2645 #define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p)
2646 #define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p)
2647 #define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p)
2648 #define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p)
2649 #define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p)
2650 #define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p)
2651 #define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p)
2652 #define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p)
2653 #define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p)
2654 #define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p)
2655 #define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p)
2656 #define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p)
2657 #define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p)
2658 #define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p)
2659 #define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p)
2660 #define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p)
2661 #define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p)
2662 #define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p)
2663 #define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p)
2664 #define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p)
2665 #define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p)
2666 #define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p)
2667 #define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p)
2668 #define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p)
2669 #define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p)
2670 #define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p)
2671 #define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p)
2672 #define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p)
2673 #define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p)
2674 #define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p)
2675 #define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p)
2676 #define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p)
2677 #define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p)
2678 #define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p)
2679 #define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p)
2680 #define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p)
2681 #define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p)
2682 #define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p)
2683 #define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p)
2684 #define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p)
2685 #define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p)
2686 #define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p)
2687 #define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p)
2688 #define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p)
2689 #define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p)
2690 #define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p)
2691 #define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p)
2692 #define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p)
2693 #define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p)
2694 #define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p)
2695 #define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p)
2696 #define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p)
2697 #define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p)
2698 #define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p)
2699 #define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p)
2700 #define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p)
2701 #define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p)
2702 #define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
2703 #define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p)
2704 #define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p)
2705 #define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
2706 #define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
2707 #define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
2708 #define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
2709 #define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
2710 #define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
2711 #define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
2712 #define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
2713 #define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
2714 #define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p)
2715 #define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p)
2716 #define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p)
2717 #define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p)
2718 #define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p)
2719 #define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p)
2720 #define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
2721 #define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p)
2722 #define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p)
2723 #define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
2724 #define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
2725 #define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
2726 #define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
2727 #define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
2728 #define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
2729 #define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
2730 #define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
2731 #define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
2732 #define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p)
2733 #define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p)
2734 #define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p)
2735 #define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p)
2736 #define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p)
2737 #define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p)
2738 #define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p)
2739 #define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p)
2740 #define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p)
2741 #define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p)
2742 #define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p)
2743 #define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p)
2744 #define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p)
2745 #define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p)
2746 #define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p)
2747 #define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p)
2748 #define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p)
2749 #define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p)
2750 #define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p)
2751 #define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p)
2752 #define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p)
2753 #define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p)
2754 #define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p)
2755 #define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p)
2756 #define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p)
2757 #define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p)
2758 #define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p)
2759 #define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p)
2760 #define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p)
2761 #define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p)
2762 #define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p)
2763 #define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p)
2764 #define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p)
2765 #define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p)
2766 #define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p)
2767 #define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p)
2768 #define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
2769 #define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
2770 #define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
2771 #define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
2772 #define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
2773 #define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
2774 #define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
2775 #define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
2776 #define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
2777 #define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p)
2778 #define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p)
2779 #define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p)
2780 #define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p)
2781 #define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p)
2782 #define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p)
2783 #define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p)
2784 #define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p)
2785 #define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p)
2786 #define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p)
2787 #define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p)
2788 #define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p)
2789 #define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p)
2790 #define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p)
2791 #define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p)
2792 #define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p)
2793 #define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p)
2794 #define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p)
2795 #define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p)
2796 #define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p)
2797 #define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p)
2798 #define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p)
2799 #define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p)
2800 #define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p)
2801 #define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p)
2802 #define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p)
2803 #define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p)
2804 #define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p)
2805 #define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p)
2806 #define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p)
2807 #define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p)
2808 #define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p)
2809 #define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p)
2810 #define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p)
2811 #define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p)
2812 #define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p)
2813 #define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p)
2814 #define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p)
2815 #define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p)
2816 #define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p)
2817 #define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p)
2818 #define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p)
2819 #define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p)
2820 #define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p)
2821 #define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p)
2822 #define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p)
2823 #define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p)
2824 #define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p)
2825 #define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p)
2826 #define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p)
2827 #define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p)
2828 #define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p)
2829 #define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p)
2830 #define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p)
2831 #define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p)
2832 #define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p)
2833 #define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p)
2834 #define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p)
2835 #define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p)
2836 #define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p)
2837 #define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p)
2838 #define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p)
2839 #define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p)
2840 #define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p)
2841 #define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p)
2842 #define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p)
2843 #define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p)
2844 #define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p)
2845 #define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p)
2846 #define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p)
2847 #define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p)
2848 #define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p)
2849 #define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p)
2850 #define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p)
2851 #define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p)
2852 #define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p)
2853 #define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p)
2854 #define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p)
2855 #define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p)
2856 #define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p)
2857 #define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p)
2858 #define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p)
2859 #define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p)
2860 #define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p)
2861 #define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p)
2862 #define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p)
2863 #define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p)
2864 #define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p)
2865 #define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p)
2866 #define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p)
2867 #define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p)
2868 #define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p)
2869 #define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p)
2870 #define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p)
2871 #define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p)
2872 #define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p)
2873 #define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p)
2874 #define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p)
2875 #define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p)
2876 #define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p)
2877 #define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p)
2878 #define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p)
2879 #define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p)
2880 #define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p)
2881 #define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p)
2882 #define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p)
2883 #define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p)
2884 #define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p)
2885 #define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p)
2886 #define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p)
2887 #define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p)
2888 #define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
2889 #define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
2890 #define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
2891 #define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
2892 #define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
2893 #define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
2894 #define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
2895 #define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p)
2896 #define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p)
2897 #define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
2898 #define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
2899 #define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
2900 #define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
2901 #define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
2902 #define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p)
2903 #define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p)
2904 #define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p)
2905 #define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p)
2906 #define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out)
2907 #define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out)
2908 #define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p)
2909 #define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p)
2910 #define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry)
2911 #define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry)
2912 #define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p)
2913 #define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p)
2914 #define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out)
2915 #define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out)
2916 #define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p)
2917 #define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p)
2918 #define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry)
2919 #define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry)
2920 #define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p)
2921 #define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p)
2922 #define vst1q_p_u8(__addr, __value, __p) __arm_vst1q_p_u8(__addr, __value, __p)
2923 #define vst1q_p_s8(__addr, __value, __p) __arm_vst1q_p_s8(__addr, __value, __p)
2924 #define vst2q_s8(__addr, __value) __arm_vst2q_s8(__addr, __value)
2925 #define vst2q_u8(__addr, __value) __arm_vst2q_u8(__addr, __value)
2926 #define vld1q_z_u8(__base, __p) __arm_vld1q_z_u8(__base, __p)
2927 #define vld1q_z_s8(__base, __p) __arm_vld1q_z_s8(__base, __p)
2928 #define vld2q_s8(__addr) __arm_vld2q_s8(__addr)
2929 #define vld2q_u8(__addr) __arm_vld2q_u8(__addr)
2930 #define vld4q_s8(__addr) __arm_vld4q_s8(__addr)
2931 #define vld4q_u8(__addr) __arm_vld4q_u8(__addr)
2932 #define vst1q_p_u16(__addr, __value, __p) __arm_vst1q_p_u16(__addr, __value, __p)
2933 #define vst1q_p_s16(__addr, __value, __p) __arm_vst1q_p_s16(__addr, __value, __p)
2934 #define vst2q_s16(__addr, __value) __arm_vst2q_s16(__addr, __value)
2935 #define vst2q_u16(__addr, __value) __arm_vst2q_u16(__addr, __value)
2936 #define vld1q_z_u16(__base, __p) __arm_vld1q_z_u16(__base, __p)
2937 #define vld1q_z_s16(__base, __p) __arm_vld1q_z_s16(__base, __p)
2938 #define vld2q_s16(__addr) __arm_vld2q_s16(__addr)
2939 #define vld2q_u16(__addr) __arm_vld2q_u16(__addr)
2940 #define vld4q_s16(__addr) __arm_vld4q_s16(__addr)
2941 #define vld4q_u16(__addr) __arm_vld4q_u16(__addr)
2942 #define vst1q_p_u32(__addr, __value, __p) __arm_vst1q_p_u32(__addr, __value, __p)
2943 #define vst1q_p_s32(__addr, __value, __p) __arm_vst1q_p_s32(__addr, __value, __p)
2944 #define vst2q_s32(__addr, __value) __arm_vst2q_s32(__addr, __value)
2945 #define vst2q_u32(__addr, __value) __arm_vst2q_u32(__addr, __value)
2946 #define vld1q_z_u32(__base, __p) __arm_vld1q_z_u32(__base, __p)
2947 #define vld1q_z_s32(__base, __p) __arm_vld1q_z_s32(__base, __p)
2948 #define vld2q_s32(__addr) __arm_vld2q_s32(__addr)
2949 #define vld2q_u32(__addr) __arm_vld2q_u32(__addr)
2950 #define vld4q_s32(__addr) __arm_vld4q_s32(__addr)
2951 #define vld4q_u32(__addr) __arm_vld4q_u32(__addr)
2952 #define vld4q_f16(__addr) __arm_vld4q_f16(__addr)
2953 #define vld2q_f16(__addr) __arm_vld2q_f16(__addr)
2954 #define vld1q_z_f16(__base, __p) __arm_vld1q_z_f16(__base, __p)
2955 #define vst2q_f16(__addr, __value) __arm_vst2q_f16(__addr, __value)
2956 #define vst1q_p_f16(__addr, __value, __p) __arm_vst1q_p_f16(__addr, __value, __p)
2957 #define vld4q_f32(__addr) __arm_vld4q_f32(__addr)
2958 #define vld2q_f32(__addr) __arm_vld2q_f32(__addr)
2959 #define vld1q_z_f32(__base, __p) __arm_vld1q_z_f32(__base, __p)
2960 #define vst2q_f32(__addr, __value) __arm_vst2q_f32(__addr, __value)
2961 #define vst1q_p_f32(__addr, __value, __p) __arm_vst1q_p_f32(__addr, __value, __p)
2962 #define vsetq_lane_f16(__a, __b, __idx) __arm_vsetq_lane_f16(__a, __b, __idx)
2963 #define vsetq_lane_f32(__a, __b, __idx) __arm_vsetq_lane_f32(__a, __b, __idx)
2964 #define vsetq_lane_s16(__a, __b, __idx) __arm_vsetq_lane_s16(__a, __b, __idx)
2965 #define vsetq_lane_s32(__a, __b, __idx) __arm_vsetq_lane_s32(__a, __b, __idx)
2966 #define vsetq_lane_s8(__a, __b, __idx) __arm_vsetq_lane_s8(__a, __b, __idx)
2967 #define vsetq_lane_s64(__a, __b, __idx) __arm_vsetq_lane_s64(__a, __b, __idx)
2968 #define vsetq_lane_u8(__a, __b, __idx) __arm_vsetq_lane_u8(__a, __b, __idx)
2969 #define vsetq_lane_u16(__a, __b, __idx) __arm_vsetq_lane_u16(__a, __b, __idx)
2970 #define vsetq_lane_u32(__a, __b, __idx) __arm_vsetq_lane_u32(__a, __b, __idx)
2971 #define vsetq_lane_u64(__a, __b, __idx) __arm_vsetq_lane_u64(__a, __b, __idx)
2972 #define vgetq_lane_f16(__a, __idx) __arm_vgetq_lane_f16(__a, __idx)
2973 #define vgetq_lane_f32(__a, __idx) __arm_vgetq_lane_f32(__a, __idx)
2974 #define vgetq_lane_s16(__a, __idx) __arm_vgetq_lane_s16(__a, __idx)
2975 #define vgetq_lane_s32(__a, __idx) __arm_vgetq_lane_s32(__a, __idx)
2976 #define vgetq_lane_s8(__a, __idx) __arm_vgetq_lane_s8(__a, __idx)
2977 #define vgetq_lane_s64(__a, __idx) __arm_vgetq_lane_s64(__a, __idx)
2978 #define vgetq_lane_u8(__a, __idx) __arm_vgetq_lane_u8(__a, __idx)
2979 #define vgetq_lane_u16(__a, __idx) __arm_vgetq_lane_u16(__a, __idx)
2980 #define vgetq_lane_u32(__a, __idx) __arm_vgetq_lane_u32(__a, __idx)
2981 #define vgetq_lane_u64(__a, __idx) __arm_vgetq_lane_u64(__a, __idx)
2982 #define sqrshr(__p0, __p1) __arm_sqrshr(__p0, __p1)
2983 #define sqrshrl(__p0, __p1) __arm_sqrshrl(__p0, __p1)
2984 #define sqrshrl_sat48(__p0, __p1) __arm_sqrshrl_sat48(__p0, __p1)
2985 #define sqshl(__p0, __p1) __arm_sqshl(__p0, __p1)
2986 #define sqshll(__p0, __p1) __arm_sqshll(__p0, __p1)
2987 #define srshr(__p0, __p1) __arm_srshr(__p0, __p1)
2988 #define srshrl(__p0, __p1) __arm_srshrl(__p0, __p1)
2989 #define uqrshl(__p0, __p1) __arm_uqrshl(__p0, __p1)
2990 #define uqrshll(__p0, __p1) __arm_uqrshll(__p0, __p1)
2991 #define uqrshll_sat48(__p0, __p1) __arm_uqrshll_sat48(__p0, __p1)
2992 #define uqshl(__p0, __p1) __arm_uqshl(__p0, __p1)
2993 #define uqshll(__p0, __p1) __arm_uqshll(__p0, __p1)
2994 #define urshr(__p0, __p1) __arm_urshr(__p0, __p1)
2995 #define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1)
2996 #define lsll(__p0, __p1) __arm_lsll(__p0, __p1)
2997 #define asrl(__p0, __p1) __arm_asrl(__p0, __p1)
2998 #define vshlcq_m_s8(__a, __b, __imm, __p) __arm_vshlcq_m_s8(__a, __b, __imm, __p)
2999 #define vshlcq_m_u8(__a, __b, __imm, __p) __arm_vshlcq_m_u8(__a, __b, __imm, __p)
3000 #define vshlcq_m_s16(__a, __b, __imm, __p) __arm_vshlcq_m_s16(__a, __b, __imm, __p)
3001 #define vshlcq_m_u16(__a, __b, __imm, __p) __arm_vshlcq_m_u16(__a, __b, __imm, __p)
3002 #define vshlcq_m_s32(__a, __b, __imm, __p) __arm_vshlcq_m_s32(__a, __b, __imm, __p)
3003 #define vshlcq_m_u32(__a, __b, __imm, __p) __arm_vshlcq_m_u32(__a, __b, __imm, __p)
3004 #endif
3005
3006 /* For big-endian, GCC's vector indices are reversed within each 64 bits
3007 compared to the architectural lane indices used by MVE intrinsics. */
3008 #define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
3009 #ifdef __ARM_BIG_ENDIAN
3010 #define __ARM_LANEQ(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1))
3011 #else
3012 #define __ARM_LANEQ(__vec, __idx) __idx
3013 #endif
3014 #define __ARM_CHECK_LANEQ(__vec, __idx) \
3015 __builtin_arm_lane_check (__ARM_NUM_LANES(__vec), \
3016 __ARM_LANEQ(__vec, __idx))
3017
3018 __extension__ extern __inline void
3019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3020 __arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value)
3021 {
3022 union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
3023 __rv.__i = __value;
3024 __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
3025 }
3026
3027 __extension__ extern __inline void
3028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3029 __arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value)
3030 {
3031 union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
3032 __rv.__i = __value;
3033 __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
3034 }
3035
3036 __extension__ extern __inline void
3037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3038 __arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value)
3039 {
3040 union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
3041 __rv.__i = __value;
3042 __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
3043 }
3044
3045 __extension__ extern __inline void
3046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3047 __arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value)
3048 {
3049 union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
3050 __rv.__i = __value;
3051 __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
3052 }
3053
3054 __extension__ extern __inline void
3055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3056 __arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value)
3057 {
3058 union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
3059 __rv.__i = __value;
3060 __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
3061 }
3062
3063 __extension__ extern __inline void
3064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3065 __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value)
3066 {
3067 union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
3068 __rv.__i = __value;
3069 __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
3070 }
3071
3072 __extension__ extern __inline int8x16_t
3073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3074 __arm_vdupq_n_s8 (int8_t __a)
3075 {
3076 return __builtin_mve_vdupq_n_sv16qi (__a);
3077 }
3078
3079 __extension__ extern __inline int16x8_t
3080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3081 __arm_vdupq_n_s16 (int16_t __a)
3082 {
3083 return __builtin_mve_vdupq_n_sv8hi (__a);
3084 }
3085
3086 __extension__ extern __inline int32x4_t
3087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3088 __arm_vdupq_n_s32 (int32_t __a)
3089 {
3090 return __builtin_mve_vdupq_n_sv4si (__a);
3091 }
3092
3093 __extension__ extern __inline int8x16_t
3094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3095 __arm_vabsq_s8 (int8x16_t __a)
3096 {
3097 return __builtin_mve_vabsq_sv16qi (__a);
3098 }
3099
3100 __extension__ extern __inline int16x8_t
3101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3102 __arm_vabsq_s16 (int16x8_t __a)
3103 {
3104 return __builtin_mve_vabsq_sv8hi (__a);
3105 }
3106
3107 __extension__ extern __inline int32x4_t
3108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3109 __arm_vabsq_s32 (int32x4_t __a)
3110 {
3111 return __builtin_mve_vabsq_sv4si (__a);
3112 }
3113
3114 __extension__ extern __inline int8x16_t
3115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3116 __arm_vclsq_s8 (int8x16_t __a)
3117 {
3118 return __builtin_mve_vclsq_sv16qi (__a);
3119 }
3120
3121 __extension__ extern __inline int16x8_t
3122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3123 __arm_vclsq_s16 (int16x8_t __a)
3124 {
3125 return __builtin_mve_vclsq_sv8hi (__a);
3126 }
3127
3128 __extension__ extern __inline int32x4_t
3129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3130 __arm_vclsq_s32 (int32x4_t __a)
3131 {
3132 return __builtin_mve_vclsq_sv4si (__a);
3133 }
3134
3135 __extension__ extern __inline int8x16_t
3136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3137 __arm_vclzq_s8 (int8x16_t __a)
3138 {
3139 return __builtin_mve_vclzq_sv16qi (__a);
3140 }
3141
3142 __extension__ extern __inline int16x8_t
3143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3144 __arm_vclzq_s16 (int16x8_t __a)
3145 {
3146 return __builtin_mve_vclzq_sv8hi (__a);
3147 }
3148
3149 __extension__ extern __inline int32x4_t
3150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3151 __arm_vclzq_s32 (int32x4_t __a)
3152 {
3153 return __builtin_mve_vclzq_sv4si (__a);
3154 }
3155
3156 __extension__ extern __inline int8x16_t
3157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3158 __arm_vnegq_s8 (int8x16_t __a)
3159 {
3160 return __builtin_mve_vnegq_sv16qi (__a);
3161 }
3162
3163 __extension__ extern __inline int16x8_t
3164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3165 __arm_vnegq_s16 (int16x8_t __a)
3166 {
3167 return __builtin_mve_vnegq_sv8hi (__a);
3168 }
3169
3170 __extension__ extern __inline int32x4_t
3171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3172 __arm_vnegq_s32 (int32x4_t __a)
3173 {
3174 return __builtin_mve_vnegq_sv4si (__a);
3175 }
3176
3177 __extension__ extern __inline int64_t
3178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3179 __arm_vaddlvq_s32 (int32x4_t __a)
3180 {
3181 return __builtin_mve_vaddlvq_sv4si (__a);
3182 }
3183
3184 __extension__ extern __inline int32_t
3185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3186 __arm_vaddvq_s8 (int8x16_t __a)
3187 {
3188 return __builtin_mve_vaddvq_sv16qi (__a);
3189 }
3190
3191 __extension__ extern __inline int32_t
3192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3193 __arm_vaddvq_s16 (int16x8_t __a)
3194 {
3195 return __builtin_mve_vaddvq_sv8hi (__a);
3196 }
3197
3198 __extension__ extern __inline int32_t
3199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3200 __arm_vaddvq_s32 (int32x4_t __a)
3201 {
3202 return __builtin_mve_vaddvq_sv4si (__a);
3203 }
3204
3205 __extension__ extern __inline int16x8_t
3206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3207 __arm_vmovlbq_s8 (int8x16_t __a)
3208 {
3209 return __builtin_mve_vmovlbq_sv16qi (__a);
3210 }
3211
3212 __extension__ extern __inline int32x4_t
3213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3214 __arm_vmovlbq_s16 (int16x8_t __a)
3215 {
3216 return __builtin_mve_vmovlbq_sv8hi (__a);
3217 }
3218
3219 __extension__ extern __inline int16x8_t
3220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3221 __arm_vmovltq_s8 (int8x16_t __a)
3222 {
3223 return __builtin_mve_vmovltq_sv16qi (__a);
3224 }
3225
3226 __extension__ extern __inline int32x4_t
3227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3228 __arm_vmovltq_s16 (int16x8_t __a)
3229 {
3230 return __builtin_mve_vmovltq_sv8hi (__a);
3231 }
3232
3233 __extension__ extern __inline int8x16_t
3234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3235 __arm_vmvnq_s8 (int8x16_t __a)
3236 {
3237 return __builtin_mve_vmvnq_sv16qi (__a);
3238 }
3239
3240 __extension__ extern __inline int16x8_t
3241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3242 __arm_vmvnq_s16 (int16x8_t __a)
3243 {
3244 return __builtin_mve_vmvnq_sv8hi (__a);
3245 }
3246
3247 __extension__ extern __inline int32x4_t
3248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3249 __arm_vmvnq_s32 (int32x4_t __a)
3250 {
3251 return __builtin_mve_vmvnq_sv4si (__a);
3252 }
3253
3254 __extension__ extern __inline int16x8_t
3255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3256 __arm_vmvnq_n_s16 (const int16_t __imm)
3257 {
3258 return __builtin_mve_vmvnq_n_sv8hi (__imm);
3259 }
3260
3261 __extension__ extern __inline int32x4_t
3262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3263 __arm_vmvnq_n_s32 (const int32_t __imm)
3264 {
3265 return __builtin_mve_vmvnq_n_sv4si (__imm);
3266 }
3267
3268 __extension__ extern __inline int8x16_t
3269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3270 __arm_vrev16q_s8 (int8x16_t __a)
3271 {
3272 return __builtin_mve_vrev16q_sv16qi (__a);
3273 }
3274
3275 __extension__ extern __inline int8x16_t
3276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3277 __arm_vrev32q_s8 (int8x16_t __a)
3278 {
3279 return __builtin_mve_vrev32q_sv16qi (__a);
3280 }
3281
3282 __extension__ extern __inline int16x8_t
3283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3284 __arm_vrev32q_s16 (int16x8_t __a)
3285 {
3286 return __builtin_mve_vrev32q_sv8hi (__a);
3287 }
3288
3289 __extension__ extern __inline int8x16_t
3290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3291 __arm_vrev64q_s8 (int8x16_t __a)
3292 {
3293 return __builtin_mve_vrev64q_sv16qi (__a);
3294 }
3295
3296 __extension__ extern __inline int16x8_t
3297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3298 __arm_vrev64q_s16 (int16x8_t __a)
3299 {
3300 return __builtin_mve_vrev64q_sv8hi (__a);
3301 }
3302
3303 __extension__ extern __inline int32x4_t
3304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3305 __arm_vrev64q_s32 (int32x4_t __a)
3306 {
3307 return __builtin_mve_vrev64q_sv4si (__a);
3308 }
3309
3310 __extension__ extern __inline int8x16_t
3311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3312 __arm_vqabsq_s8 (int8x16_t __a)
3313 {
3314 return __builtin_mve_vqabsq_sv16qi (__a);
3315 }
3316
3317 __extension__ extern __inline int16x8_t
3318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3319 __arm_vqabsq_s16 (int16x8_t __a)
3320 {
3321 return __builtin_mve_vqabsq_sv8hi (__a);
3322 }
3323
3324 __extension__ extern __inline int32x4_t
3325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3326 __arm_vqabsq_s32 (int32x4_t __a)
3327 {
3328 return __builtin_mve_vqabsq_sv4si (__a);
3329 }
3330
3331 __extension__ extern __inline int8x16_t
3332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3333 __arm_vqnegq_s8 (int8x16_t __a)
3334 {
3335 return __builtin_mve_vqnegq_sv16qi (__a);
3336 }
3337
3338 __extension__ extern __inline int16x8_t
3339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3340 __arm_vqnegq_s16 (int16x8_t __a)
3341 {
3342 return __builtin_mve_vqnegq_sv8hi (__a);
3343 }
3344
3345 __extension__ extern __inline int32x4_t
3346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3347 __arm_vqnegq_s32 (int32x4_t __a)
3348 {
3349 return __builtin_mve_vqnegq_sv4si (__a);
3350 }
3351
3352 __extension__ extern __inline uint8x16_t
3353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3354 __arm_vrev64q_u8 (uint8x16_t __a)
3355 {
3356 return __builtin_mve_vrev64q_uv16qi (__a);
3357 }
3358
3359 __extension__ extern __inline uint16x8_t
3360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3361 __arm_vrev64q_u16 (uint16x8_t __a)
3362 {
3363 return __builtin_mve_vrev64q_uv8hi (__a);
3364 }
3365
3366 __extension__ extern __inline uint32x4_t
3367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3368 __arm_vrev64q_u32 (uint32x4_t __a)
3369 {
3370 return __builtin_mve_vrev64q_uv4si (__a);
3371 }
3372
3373 __extension__ extern __inline uint8x16_t
3374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3375 __arm_vmvnq_u8 (uint8x16_t __a)
3376 {
3377 return __builtin_mve_vmvnq_uv16qi (__a);
3378 }
3379
3380 __extension__ extern __inline uint16x8_t
3381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3382 __arm_vmvnq_u16 (uint16x8_t __a)
3383 {
3384 return __builtin_mve_vmvnq_uv8hi (__a);
3385 }
3386
3387 __extension__ extern __inline uint32x4_t
3388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3389 __arm_vmvnq_u32 (uint32x4_t __a)
3390 {
3391 return __builtin_mve_vmvnq_uv4si (__a);
3392 }
3393
3394 __extension__ extern __inline uint8x16_t
3395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3396 __arm_vdupq_n_u8 (uint8_t __a)
3397 {
3398 return __builtin_mve_vdupq_n_uv16qi (__a);
3399 }
3400
3401 __extension__ extern __inline uint16x8_t
3402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3403 __arm_vdupq_n_u16 (uint16_t __a)
3404 {
3405 return __builtin_mve_vdupq_n_uv8hi (__a);
3406 }
3407
3408 __extension__ extern __inline uint32x4_t
3409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3410 __arm_vdupq_n_u32 (uint32_t __a)
3411 {
3412 return __builtin_mve_vdupq_n_uv4si (__a);
3413 }
3414
3415 __extension__ extern __inline uint8x16_t
3416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3417 __arm_vclzq_u8 (uint8x16_t __a)
3418 {
3419 return __builtin_mve_vclzq_uv16qi (__a);
3420 }
3421
3422 __extension__ extern __inline uint16x8_t
3423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3424 __arm_vclzq_u16 (uint16x8_t __a)
3425 {
3426 return __builtin_mve_vclzq_uv8hi (__a);
3427 }
3428
3429 __extension__ extern __inline uint32x4_t
3430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3431 __arm_vclzq_u32 (uint32x4_t __a)
3432 {
3433 return __builtin_mve_vclzq_uv4si (__a);
3434 }
3435
3436 __extension__ extern __inline uint32_t
3437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3438 __arm_vaddvq_u8 (uint8x16_t __a)
3439 {
3440 return __builtin_mve_vaddvq_uv16qi (__a);
3441 }
3442
3443 __extension__ extern __inline uint32_t
3444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3445 __arm_vaddvq_u16 (uint16x8_t __a)
3446 {
3447 return __builtin_mve_vaddvq_uv8hi (__a);
3448 }
3449
3450 __extension__ extern __inline uint32_t
3451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3452 __arm_vaddvq_u32 (uint32x4_t __a)
3453 {
3454 return __builtin_mve_vaddvq_uv4si (__a);
3455 }
3456
3457 __extension__ extern __inline uint8x16_t
3458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3459 __arm_vrev32q_u8 (uint8x16_t __a)
3460 {
3461 return __builtin_mve_vrev32q_uv16qi (__a);
3462 }
3463
3464 __extension__ extern __inline uint16x8_t
3465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3466 __arm_vrev32q_u16 (uint16x8_t __a)
3467 {
3468 return __builtin_mve_vrev32q_uv8hi (__a);
3469 }
3470
3471 __extension__ extern __inline uint16x8_t
3472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3473 __arm_vmovltq_u8 (uint8x16_t __a)
3474 {
3475 return __builtin_mve_vmovltq_uv16qi (__a);
3476 }
3477
3478 __extension__ extern __inline uint32x4_t
3479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3480 __arm_vmovltq_u16 (uint16x8_t __a)
3481 {
3482 return __builtin_mve_vmovltq_uv8hi (__a);
3483 }
3484
3485 __extension__ extern __inline uint16x8_t
3486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3487 __arm_vmovlbq_u8 (uint8x16_t __a)
3488 {
3489 return __builtin_mve_vmovlbq_uv16qi (__a);
3490 }
3491
3492 __extension__ extern __inline uint32x4_t
3493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3494 __arm_vmovlbq_u16 (uint16x8_t __a)
3495 {
3496 return __builtin_mve_vmovlbq_uv8hi (__a);
3497 }
3498
3499 __extension__ extern __inline uint16x8_t
3500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3501 __arm_vmvnq_n_u16 (const int __imm)
3502 {
3503 return __builtin_mve_vmvnq_n_uv8hi (__imm);
3504 }
3505
3506 __extension__ extern __inline uint32x4_t
3507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3508 __arm_vmvnq_n_u32 (const int __imm)
3509 {
3510 return __builtin_mve_vmvnq_n_uv4si (__imm);
3511 }
3512
3513 __extension__ extern __inline uint8x16_t
3514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3515 __arm_vrev16q_u8 (uint8x16_t __a)
3516 {
3517 return __builtin_mve_vrev16q_uv16qi (__a);
3518 }
3519
3520 __extension__ extern __inline uint64_t
3521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3522 __arm_vaddlvq_u32 (uint32x4_t __a)
3523 {
3524 return __builtin_mve_vaddlvq_uv4si (__a);
3525 }
3526
3527 __extension__ extern __inline int64_t
3528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3529 __arm_vctp16q (uint32_t __a)
3530 {
3531 return __builtin_mve_vctp16qhi (__a);
3532 }
3533
3534 __extension__ extern __inline mve_pred16_t
3535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3536 __arm_vctp32q (uint32_t __a)
3537 {
3538 return __builtin_mve_vctp32qhi (__a);
3539 }
3540
3541 __extension__ extern __inline mve_pred16_t
3542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3543 __arm_vctp64q (uint32_t __a)
3544 {
3545 return __builtin_mve_vctp64qhi (__a);
3546 }
3547
3548 __extension__ extern __inline mve_pred16_t
3549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3550 __arm_vctp8q (uint32_t __a)
3551 {
3552 return __builtin_mve_vctp8qhi (__a);
3553 }
3554
3555 __extension__ extern __inline mve_pred16_t
3556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3557 __arm_vpnot (mve_pred16_t __a)
3558 {
3559 return __builtin_mve_vpnothi (__a);
3560 }
3561
3562 __extension__ extern __inline uint8x16_t
3563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3564 __arm_vcreateq_u8 (uint64_t __a, uint64_t __b)
3565 {
3566 return __builtin_mve_vcreateq_uv16qi (__a, __b);
3567 }
3568
3569 __extension__ extern __inline uint16x8_t
3570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3571 __arm_vcreateq_u16 (uint64_t __a, uint64_t __b)
3572 {
3573 return __builtin_mve_vcreateq_uv8hi (__a, __b);
3574 }
3575
3576 __extension__ extern __inline uint32x4_t
3577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3578 __arm_vcreateq_u32 (uint64_t __a, uint64_t __b)
3579 {
3580 return __builtin_mve_vcreateq_uv4si (__a, __b);
3581 }
3582
3583 __extension__ extern __inline uint64x2_t
3584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3585 __arm_vcreateq_u64 (uint64_t __a, uint64_t __b)
3586 {
3587 return __builtin_mve_vcreateq_uv2di (__a, __b);
3588 }
3589
3590 __extension__ extern __inline int8x16_t
3591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3592 __arm_vcreateq_s8 (uint64_t __a, uint64_t __b)
3593 {
3594 return __builtin_mve_vcreateq_sv16qi (__a, __b);
3595 }
3596
3597 __extension__ extern __inline int16x8_t
3598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3599 __arm_vcreateq_s16 (uint64_t __a, uint64_t __b)
3600 {
3601 return __builtin_mve_vcreateq_sv8hi (__a, __b);
3602 }
3603
3604 __extension__ extern __inline int32x4_t
3605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3606 __arm_vcreateq_s32 (uint64_t __a, uint64_t __b)
3607 {
3608 return __builtin_mve_vcreateq_sv4si (__a, __b);
3609 }
3610
3611 __extension__ extern __inline int64x2_t
3612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3613 __arm_vcreateq_s64 (uint64_t __a, uint64_t __b)
3614 {
3615 return __builtin_mve_vcreateq_sv2di (__a, __b);
3616 }
3617
3618 __extension__ extern __inline int8x16_t
3619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3620 __arm_vshrq_n_s8 (int8x16_t __a, const int __imm)
3621 {
3622 return __builtin_mve_vshrq_n_sv16qi (__a, __imm);
3623 }
3624
3625 __extension__ extern __inline int16x8_t
3626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3627 __arm_vshrq_n_s16 (int16x8_t __a, const int __imm)
3628 {
3629 return __builtin_mve_vshrq_n_sv8hi (__a, __imm);
3630 }
3631
3632 __extension__ extern __inline int32x4_t
3633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3634 __arm_vshrq_n_s32 (int32x4_t __a, const int __imm)
3635 {
3636 return __builtin_mve_vshrq_n_sv4si (__a, __imm);
3637 }
3638
3639 __extension__ extern __inline uint8x16_t
3640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3641 __arm_vshrq_n_u8 (uint8x16_t __a, const int __imm)
3642 {
3643 return __builtin_mve_vshrq_n_uv16qi (__a, __imm);
3644 }
3645
3646 __extension__ extern __inline uint16x8_t
3647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3648 __arm_vshrq_n_u16 (uint16x8_t __a, const int __imm)
3649 {
3650 return __builtin_mve_vshrq_n_uv8hi (__a, __imm);
3651 }
3652
3653 __extension__ extern __inline uint32x4_t
3654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3655 __arm_vshrq_n_u32 (uint32x4_t __a, const int __imm)
3656 {
3657 return __builtin_mve_vshrq_n_uv4si (__a, __imm);
3658 }
3659 __extension__ extern __inline int64_t
3660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3661 __arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
3662 {
3663 return __builtin_mve_vaddlvq_p_sv4si (__a, __p);
3664 }
3665
3666 __extension__ extern __inline uint64_t
3667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3668 __arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
3669 {
3670 return __builtin_mve_vaddlvq_p_uv4si (__a, __p);
3671 }
3672
3673 __extension__ extern __inline mve_pred16_t
3674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3675 __arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b)
3676 {
3677 return __builtin_mve_vcmpneq_sv16qi (__a, __b);
3678 }
3679
3680 __extension__ extern __inline mve_pred16_t
3681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3682 __arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b)
3683 {
3684 return __builtin_mve_vcmpneq_sv8hi (__a, __b);
3685 }
3686
3687 __extension__ extern __inline mve_pred16_t
3688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3689 __arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b)
3690 {
3691 return __builtin_mve_vcmpneq_sv4si (__a, __b);
3692 }
3693
3694 __extension__ extern __inline mve_pred16_t
3695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3696 __arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b)
3697 {
3698 return __builtin_mve_vcmpneq_uv16qi (__a, __b);
3699 }
3700
3701 __extension__ extern __inline mve_pred16_t
3702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3703 __arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b)
3704 {
3705 return __builtin_mve_vcmpneq_uv8hi (__a, __b);
3706 }
3707
3708 __extension__ extern __inline mve_pred16_t
3709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3710 __arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b)
3711 {
3712 return __builtin_mve_vcmpneq_uv4si (__a, __b);
3713 }
3714
3715 __extension__ extern __inline int8x16_t
3716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3717 __arm_vshlq_s8 (int8x16_t __a, int8x16_t __b)
3718 {
3719 return __builtin_mve_vshlq_sv16qi (__a, __b);
3720 }
3721
3722 __extension__ extern __inline int16x8_t
3723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3724 __arm_vshlq_s16 (int16x8_t __a, int16x8_t __b)
3725 {
3726 return __builtin_mve_vshlq_sv8hi (__a, __b);
3727 }
3728
3729 __extension__ extern __inline int32x4_t
3730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3731 __arm_vshlq_s32 (int32x4_t __a, int32x4_t __b)
3732 {
3733 return __builtin_mve_vshlq_sv4si (__a, __b);
3734 }
3735
3736 __extension__ extern __inline uint8x16_t
3737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3738 __arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b)
3739 {
3740 return __builtin_mve_vshlq_uv16qi (__a, __b);
3741 }
3742
3743 __extension__ extern __inline uint16x8_t
3744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3745 __arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b)
3746 {
3747 return __builtin_mve_vshlq_uv8hi (__a, __b);
3748 }
3749
3750 __extension__ extern __inline uint32x4_t
3751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3752 __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b)
3753 {
3754 return __builtin_mve_vshlq_uv4si (__a, __b);
3755 }
3756 __extension__ extern __inline uint8x16_t
3757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3758 __arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3759 {
3760 return __builtin_mve_vsubq_uv16qi (__a, __b);
3761 }
3762
3763 __extension__ extern __inline uint8x16_t
3764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3765 __arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3766 {
3767 return __builtin_mve_vsubq_n_uv16qi (__a, __b);
3768 }
3769
3770 __extension__ extern __inline uint8x16_t
3771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3772 __arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
3773 {
3774 return __builtin_mve_vrmulhq_uv16qi (__a, __b);
3775 }
3776
3777 __extension__ extern __inline uint8x16_t
3778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3779 __arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3780 {
3781 return __builtin_mve_vrhaddq_uv16qi (__a, __b);
3782 }
3783
3784 __extension__ extern __inline uint8x16_t
3785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3786 __arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3787 {
3788 return __builtin_mve_vqsubq_uv16qi (__a, __b);
3789 }
3790
3791 __extension__ extern __inline uint8x16_t
3792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3793 __arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3794 {
3795 return __builtin_mve_vqsubq_n_uv16qi (__a, __b);
3796 }
3797
3798 __extension__ extern __inline uint8x16_t
3799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3800 __arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3801 {
3802 return __builtin_mve_vqaddq_uv16qi (__a, __b);
3803 }
3804
3805 __extension__ extern __inline uint8x16_t
3806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3807 __arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b)
3808 {
3809 return __builtin_mve_vqaddq_n_uv16qi (__a, __b);
3810 }
3811
3812 __extension__ extern __inline uint8x16_t
3813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3814 __arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
3815 {
3816 return __builtin_mve_vorrq_uv16qi (__a, __b);
3817 }
3818
3819 __extension__ extern __inline uint8x16_t
3820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3821 __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b)
3822 {
3823 return __builtin_mve_vornq_uv16qi (__a, __b);
3824 }
3825
3826 __extension__ extern __inline uint8x16_t
3827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3828 __arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
3829 {
3830 return __builtin_mve_vmulq_uv16qi (__a, __b);
3831 }
3832
3833 __extension__ extern __inline uint8x16_t
3834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3835 __arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b)
3836 {
3837 return __builtin_mve_vmulq_n_uv16qi (__a, __b);
3838 }
3839
3840 __extension__ extern __inline uint16x8_t
3841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3842 __arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b)
3843 {
3844 return __builtin_mve_vmulltq_int_uv16qi (__a, __b);
3845 }
3846
3847 __extension__ extern __inline uint16x8_t
3848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3849 __arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b)
3850 {
3851 return __builtin_mve_vmullbq_int_uv16qi (__a, __b);
3852 }
3853
3854 __extension__ extern __inline uint8x16_t
3855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3856 __arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
3857 {
3858 return __builtin_mve_vmulhq_uv16qi (__a, __b);
3859 }
3860
3861 __extension__ extern __inline uint32_t
3862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3863 __arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b)
3864 {
3865 return __builtin_mve_vmladavq_uv16qi (__a, __b);
3866 }
3867
3868 __extension__ extern __inline uint8_t
3869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3870 __arm_vminvq_u8 (uint8_t __a, uint8x16_t __b)
3871 {
3872 return __builtin_mve_vminvq_uv16qi (__a, __b);
3873 }
3874
3875 __extension__ extern __inline uint8x16_t
3876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3877 __arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b)
3878 {
3879 return __builtin_mve_vminq_uv16qi (__a, __b);
3880 }
3881
3882 __extension__ extern __inline uint8_t
3883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3884 __arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b)
3885 {
3886 return __builtin_mve_vmaxvq_uv16qi (__a, __b);
3887 }
3888
3889 __extension__ extern __inline uint8x16_t
3890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3891 __arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
3892 {
3893 return __builtin_mve_vmaxq_uv16qi (__a, __b);
3894 }
3895
3896 __extension__ extern __inline uint8x16_t
3897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3898 __arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
3899 {
3900 return __builtin_mve_vhsubq_uv16qi (__a, __b);
3901 }
3902
3903 __extension__ extern __inline uint8x16_t
3904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3905 __arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b)
3906 {
3907 return __builtin_mve_vhsubq_n_uv16qi (__a, __b);
3908 }
3909
3910 __extension__ extern __inline uint8x16_t
3911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3912 __arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
3913 {
3914 return __builtin_mve_vhaddq_uv16qi (__a, __b);
3915 }
3916
3917 __extension__ extern __inline uint8x16_t
3918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3919 __arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b)
3920 {
3921 return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
3922 }
3923
3924 __extension__ extern __inline uint8x16_t
3925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3926 __arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
3927 {
3928 return __builtin_mve_veorq_uv16qi (__a, __b);
3929 }
3930
3931 __extension__ extern __inline mve_pred16_t
3932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3933 __arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
3934 {
3935 return __builtin_mve_vcmpneq_n_uv16qi (__a, __b);
3936 }
3937
3938 __extension__ extern __inline mve_pred16_t
3939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3940 __arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b)
3941 {
3942 return __builtin_mve_vcmphiq_uv16qi (__a, __b);
3943 }
3944
3945 __extension__ extern __inline mve_pred16_t
3946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3947 __arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b)
3948 {
3949 return __builtin_mve_vcmphiq_n_uv16qi (__a, __b);
3950 }
3951
3952 __extension__ extern __inline mve_pred16_t
3953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3954 __arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b)
3955 {
3956 return __builtin_mve_vcmpeqq_uv16qi (__a, __b);
3957 }
3958
3959 __extension__ extern __inline mve_pred16_t
3960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3961 __arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b)
3962 {
3963 return __builtin_mve_vcmpeqq_n_uv16qi (__a, __b);
3964 }
3965
3966 __extension__ extern __inline mve_pred16_t
3967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3968 __arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b)
3969 {
3970 return __builtin_mve_vcmpcsq_uv16qi (__a, __b);
3971 }
3972
3973 __extension__ extern __inline mve_pred16_t
3974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3975 __arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b)
3976 {
3977 return __builtin_mve_vcmpcsq_n_uv16qi (__a, __b);
3978 }
3979
3980 __extension__ extern __inline uint8x16_t
3981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3982 __arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b)
3983 {
3984 return (uint8x16_t)
3985 __builtin_mve_vcaddq_rot90v16qi ((int8x16_t)__a, (int8x16_t)__b);
3986 }
3987
3988 __extension__ extern __inline uint8x16_t
3989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3990 __arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b)
3991 {
3992 return (uint8x16_t)
3993 __builtin_mve_vcaddq_rot270v16qi ((int8x16_t)__a, (int8x16_t)__b);
3994 }
3995
3996 __extension__ extern __inline uint8x16_t
3997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3998 __arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
3999 {
4000 return __builtin_mve_vbicq_uv16qi (__a, __b);
4001 }
4002
4003 __extension__ extern __inline uint8x16_t
4004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4005 __arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
4006 {
4007 return __builtin_mve_vandq_uv16qi (__a, __b);
4008 }
4009
4010 __extension__ extern __inline uint32_t
4011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4012 __arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
4013 {
4014 return __builtin_mve_vaddvq_p_uv16qi (__a, __p);
4015 }
4016
4017 __extension__ extern __inline uint32_t
4018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4019 __arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b)
4020 {
4021 return __builtin_mve_vaddvaq_uv16qi (__a, __b);
4022 }
4023
4024 __extension__ extern __inline uint8x16_t
4025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4026 __arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b)
4027 {
4028 return __builtin_mve_vaddq_n_uv16qi (__a, __b);
4029 }
4030
4031 __extension__ extern __inline uint8x16_t
4032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4033 __arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
4034 {
4035 return __builtin_mve_vabdq_uv16qi (__a, __b);
4036 }
4037
4038 __extension__ extern __inline uint8x16_t
4039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4040 __arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
4041 {
4042 return __builtin_mve_vshlq_r_uv16qi (__a, __b);
4043 }
4044
4045 __extension__ extern __inline uint8x16_t
4046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4047 __arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
4048 {
4049 return __builtin_mve_vrshlq_uv16qi (__a, __b);
4050 }
4051
4052 __extension__ extern __inline uint8x16_t
4053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4054 __arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
4055 {
4056 return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
4057 }
4058
4059 __extension__ extern __inline uint8x16_t
4060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4061 __arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
4062 {
4063 return __builtin_mve_vqshlq_uv16qi (__a, __b);
4064 }
4065
4066 __extension__ extern __inline uint8x16_t
4067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4068 __arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
4069 {
4070 return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
4071 }
4072
4073 __extension__ extern __inline uint8x16_t
4074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4075 __arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
4076 {
4077 return __builtin_mve_vqrshlq_uv16qi (__a, __b);
4078 }
4079
4080 __extension__ extern __inline uint8x16_t
4081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4082 __arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
4083 {
4084 return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
4085 }
4086
4087 __extension__ extern __inline uint8_t
4088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4089 __arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
4090 {
4091 return __builtin_mve_vminavq_sv16qi (__a, __b);
4092 }
4093
4094 __extension__ extern __inline uint8x16_t
4095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4096 __arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b)
4097 {
4098 return __builtin_mve_vminaq_sv16qi (__a, __b);
4099 }
4100
4101 __extension__ extern __inline uint8_t
4102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4103 __arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b)
4104 {
4105 return __builtin_mve_vmaxavq_sv16qi (__a, __b);
4106 }
4107
4108 __extension__ extern __inline uint8x16_t
4109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4110 __arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b)
4111 {
4112 return __builtin_mve_vmaxaq_sv16qi (__a, __b);
4113 }
4114
4115 __extension__ extern __inline uint8x16_t
4116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4117 __arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b)
4118 {
4119 return __builtin_mve_vbrsrq_n_uv16qi (__a, __b);
4120 }
4121
4122 __extension__ extern __inline uint8x16_t
4123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4124 __arm_vshlq_n_u8 (uint8x16_t __a, const int __imm)
4125 {
4126 return __builtin_mve_vshlq_n_uv16qi (__a, __imm);
4127 }
4128
4129 __extension__ extern __inline uint8x16_t
4130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4131 __arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm)
4132 {
4133 return __builtin_mve_vrshrq_n_uv16qi (__a, __imm);
4134 }
4135
4136 __extension__ extern __inline uint8x16_t
4137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4138 __arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm)
4139 {
4140 return __builtin_mve_vqshlq_n_uv16qi (__a, __imm);
4141 }
4142
4143 __extension__ extern __inline mve_pred16_t
4144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4145 __arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b)
4146 {
4147 return __builtin_mve_vcmpneq_n_sv16qi (__a, __b);
4148 }
4149
4150 __extension__ extern __inline mve_pred16_t
4151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4152 __arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b)
4153 {
4154 return __builtin_mve_vcmpltq_sv16qi (__a, __b);
4155 }
4156
4157 __extension__ extern __inline mve_pred16_t
4158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4159 __arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b)
4160 {
4161 return __builtin_mve_vcmpltq_n_sv16qi (__a, __b);
4162 }
4163
4164 __extension__ extern __inline mve_pred16_t
4165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4166 __arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b)
4167 {
4168 return __builtin_mve_vcmpleq_sv16qi (__a, __b);
4169 }
4170
4171 __extension__ extern __inline mve_pred16_t
4172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4173 __arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b)
4174 {
4175 return __builtin_mve_vcmpleq_n_sv16qi (__a, __b);
4176 }
4177
4178 __extension__ extern __inline mve_pred16_t
4179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4180 __arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b)
4181 {
4182 return __builtin_mve_vcmpgtq_sv16qi (__a, __b);
4183 }
4184
4185 __extension__ extern __inline mve_pred16_t
4186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4187 __arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b)
4188 {
4189 return __builtin_mve_vcmpgtq_n_sv16qi (__a, __b);
4190 }
4191
4192 __extension__ extern __inline mve_pred16_t
4193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4194 __arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b)
4195 {
4196 return __builtin_mve_vcmpgeq_sv16qi (__a, __b);
4197 }
4198
4199 __extension__ extern __inline mve_pred16_t
4200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4201 __arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b)
4202 {
4203 return __builtin_mve_vcmpgeq_n_sv16qi (__a, __b);
4204 }
4205
4206 __extension__ extern __inline mve_pred16_t
4207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4208 __arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b)
4209 {
4210 return __builtin_mve_vcmpeqq_sv16qi (__a, __b);
4211 }
4212
4213 __extension__ extern __inline mve_pred16_t
4214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4215 __arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b)
4216 {
4217 return __builtin_mve_vcmpeqq_n_sv16qi (__a, __b);
4218 }
4219
4220 __extension__ extern __inline uint8x16_t
4221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4222 __arm_vqshluq_n_s8 (int8x16_t __a, const int __imm)
4223 {
4224 return __builtin_mve_vqshluq_n_sv16qi (__a, __imm);
4225 }
4226
4227 __extension__ extern __inline int32_t
4228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4229 __arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p)
4230 {
4231 return __builtin_mve_vaddvq_p_sv16qi (__a, __p);
4232 }
4233
4234 __extension__ extern __inline int8x16_t
4235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4236 __arm_vsubq_s8 (int8x16_t __a, int8x16_t __b)
4237 {
4238 return __builtin_mve_vsubq_sv16qi (__a, __b);
4239 }
4240
4241 __extension__ extern __inline int8x16_t
4242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4243 __arm_vsubq_n_s8 (int8x16_t __a, int8_t __b)
4244 {
4245 return __builtin_mve_vsubq_n_sv16qi (__a, __b);
4246 }
4247
4248 __extension__ extern __inline int8x16_t
4249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4250 __arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
4251 {
4252 return __builtin_mve_vshlq_r_sv16qi (__a, __b);
4253 }
4254
4255 __extension__ extern __inline int8x16_t
4256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4257 __arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
4258 {
4259 return __builtin_mve_vrshlq_sv16qi (__a, __b);
4260 }
4261
4262 __extension__ extern __inline int8x16_t
4263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4264 __arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
4265 {
4266 return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
4267 }
4268
4269 __extension__ extern __inline int8x16_t
4270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4271 __arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b)
4272 {
4273 return __builtin_mve_vrmulhq_sv16qi (__a, __b);
4274 }
4275
4276 __extension__ extern __inline int8x16_t
4277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4278 __arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
4279 {
4280 return __builtin_mve_vrhaddq_sv16qi (__a, __b);
4281 }
4282
4283 __extension__ extern __inline int8x16_t
4284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4285 __arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b)
4286 {
4287 return __builtin_mve_vqsubq_sv16qi (__a, __b);
4288 }
4289
4290 __extension__ extern __inline int8x16_t
4291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4292 __arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b)
4293 {
4294 return __builtin_mve_vqsubq_n_sv16qi (__a, __b);
4295 }
4296
4297 __extension__ extern __inline int8x16_t
4298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4299 __arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
4300 {
4301 return __builtin_mve_vqshlq_sv16qi (__a, __b);
4302 }
4303
4304 __extension__ extern __inline int8x16_t
4305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4306 __arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
4307 {
4308 return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
4309 }
4310
4311 __extension__ extern __inline int8x16_t
4312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4313 __arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
4314 {
4315 return __builtin_mve_vqrshlq_sv16qi (__a, __b);
4316 }
4317
4318 __extension__ extern __inline int8x16_t
4319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4320 __arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
4321 {
4322 return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
4323 }
4324
4325 __extension__ extern __inline int8x16_t
4326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4327 __arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
4328 {
4329 return __builtin_mve_vqrdmulhq_sv16qi (__a, __b);
4330 }
4331
4332 __extension__ extern __inline int8x16_t
4333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4334 __arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b)
4335 {
4336 return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b);
4337 }
4338
4339 __extension__ extern __inline int8x16_t
4340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4341 __arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b)
4342 {
4343 return __builtin_mve_vqdmulhq_sv16qi (__a, __b);
4344 }
4345
4346 __extension__ extern __inline int8x16_t
4347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4348 __arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b)
4349 {
4350 return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b);
4351 }
4352
4353 __extension__ extern __inline int8x16_t
4354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4355 __arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b)
4356 {
4357 return __builtin_mve_vqaddq_sv16qi (__a, __b);
4358 }
4359
4360 __extension__ extern __inline int8x16_t
4361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4362 __arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b)
4363 {
4364 return __builtin_mve_vqaddq_n_sv16qi (__a, __b);
4365 }
4366
4367 __extension__ extern __inline int8x16_t
4368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4369 __arm_vorrq_s8 (int8x16_t __a, int8x16_t __b)
4370 {
4371 return __builtin_mve_vorrq_sv16qi (__a, __b);
4372 }
4373
4374 __extension__ extern __inline int8x16_t
4375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4376 __arm_vornq_s8 (int8x16_t __a, int8x16_t __b)
4377 {
4378 return __builtin_mve_vornq_sv16qi (__a, __b);
4379 }
4380
4381 __extension__ extern __inline int8x16_t
4382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4383 __arm_vmulq_s8 (int8x16_t __a, int8x16_t __b)
4384 {
4385 return __builtin_mve_vmulq_sv16qi (__a, __b);
4386 }
4387
4388 __extension__ extern __inline int8x16_t
4389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4390 __arm_vmulq_n_s8 (int8x16_t __a, int8_t __b)
4391 {
4392 return __builtin_mve_vmulq_n_sv16qi (__a, __b);
4393 }
4394
4395 __extension__ extern __inline int16x8_t
4396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4397 __arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b)
4398 {
4399 return __builtin_mve_vmulltq_int_sv16qi (__a, __b);
4400 }
4401
4402 __extension__ extern __inline int16x8_t
4403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4404 __arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b)
4405 {
4406 return __builtin_mve_vmullbq_int_sv16qi (__a, __b);
4407 }
4408
4409 __extension__ extern __inline int8x16_t
4410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4411 __arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b)
4412 {
4413 return __builtin_mve_vmulhq_sv16qi (__a, __b);
4414 }
4415
4416 __extension__ extern __inline int32_t
4417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4418 __arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b)
4419 {
4420 return __builtin_mve_vmlsdavxq_sv16qi (__a, __b);
4421 }
4422
4423 __extension__ extern __inline int32_t
4424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4425 __arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b)
4426 {
4427 return __builtin_mve_vmlsdavq_sv16qi (__a, __b);
4428 }
4429
4430 __extension__ extern __inline int32_t
4431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4432 __arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b)
4433 {
4434 return __builtin_mve_vmladavxq_sv16qi (__a, __b);
4435 }
4436
4437 __extension__ extern __inline int32_t
4438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4439 __arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b)
4440 {
4441 return __builtin_mve_vmladavq_sv16qi (__a, __b);
4442 }
4443
4444 __extension__ extern __inline int8_t
4445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4446 __arm_vminvq_s8 (int8_t __a, int8x16_t __b)
4447 {
4448 return __builtin_mve_vminvq_sv16qi (__a, __b);
4449 }
4450
4451 __extension__ extern __inline int8x16_t
4452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4453 __arm_vminq_s8 (int8x16_t __a, int8x16_t __b)
4454 {
4455 return __builtin_mve_vminq_sv16qi (__a, __b);
4456 }
4457
4458 __extension__ extern __inline int8_t
4459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4460 __arm_vmaxvq_s8 (int8_t __a, int8x16_t __b)
4461 {
4462 return __builtin_mve_vmaxvq_sv16qi (__a, __b);
4463 }
4464
4465 __extension__ extern __inline int8x16_t
4466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4467 __arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b)
4468 {
4469 return __builtin_mve_vmaxq_sv16qi (__a, __b);
4470 }
4471
4472 __extension__ extern __inline int8x16_t
4473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4474 __arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b)
4475 {
4476 return __builtin_mve_vhsubq_sv16qi (__a, __b);
4477 }
4478
4479 __extension__ extern __inline int8x16_t
4480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4481 __arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b)
4482 {
4483 return __builtin_mve_vhsubq_n_sv16qi (__a, __b);
4484 }
4485
4486 __extension__ extern __inline int8x16_t
4487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4488 __arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
4489 {
4490 return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b);
4491 }
4492
4493 __extension__ extern __inline int8x16_t
4494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4495 __arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
4496 {
4497 return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b);
4498 }
4499
4500 __extension__ extern __inline int8x16_t
4501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4502 __arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b)
4503 {
4504 return __builtin_mve_vhaddq_sv16qi (__a, __b);
4505 }
4506
4507 __extension__ extern __inline int8x16_t
4508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4509 __arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
4510 {
4511 return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
4512 }
4513
4514 __extension__ extern __inline int8x16_t
4515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4516 __arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
4517 {
4518 return __builtin_mve_veorq_sv16qi (__a, __b);
4519 }
4520
4521 __extension__ extern __inline int8x16_t
4522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4523 __arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
4524 {
4525 return __builtin_mve_vcaddq_rot90v16qi (__a, __b);
4526 }
4527
4528 __extension__ extern __inline int8x16_t
4529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4530 __arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
4531 {
4532 return __builtin_mve_vcaddq_rot270v16qi (__a, __b);
4533 }
4534
4535 __extension__ extern __inline int8x16_t
4536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4537 __arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b)
4538 {
4539 return __builtin_mve_vbrsrq_n_sv16qi (__a, __b);
4540 }
4541
4542 __extension__ extern __inline int8x16_t
4543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4544 __arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
4545 {
4546 return __builtin_mve_vbicq_sv16qi (__a, __b);
4547 }
4548
4549 __extension__ extern __inline int8x16_t
4550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4551 __arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
4552 {
4553 return __builtin_mve_vandq_sv16qi (__a, __b);
4554 }
4555
4556 __extension__ extern __inline int32_t
4557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4558 __arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
4559 {
4560 return __builtin_mve_vaddvaq_sv16qi (__a, __b);
4561 }
4562
4563 __extension__ extern __inline int8x16_t
4564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4565 __arm_vaddq_n_s8 (int8x16_t __a, int8_t __b)
4566 {
4567 return __builtin_mve_vaddq_n_sv16qi (__a, __b);
4568 }
4569
4570 __extension__ extern __inline int8x16_t
4571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4572 __arm_vabdq_s8 (int8x16_t __a, int8x16_t __b)
4573 {
4574 return __builtin_mve_vabdq_sv16qi (__a, __b);
4575 }
4576
4577 __extension__ extern __inline int8x16_t
4578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4579 __arm_vshlq_n_s8 (int8x16_t __a, const int __imm)
4580 {
4581 return __builtin_mve_vshlq_n_sv16qi (__a, __imm);
4582 }
4583
4584 __extension__ extern __inline int8x16_t
4585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4586 __arm_vrshrq_n_s8 (int8x16_t __a, const int __imm)
4587 {
4588 return __builtin_mve_vrshrq_n_sv16qi (__a, __imm);
4589 }
4590
4591 __extension__ extern __inline int8x16_t
4592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4593 __arm_vqshlq_n_s8 (int8x16_t __a, const int __imm)
4594 {
4595 return __builtin_mve_vqshlq_n_sv16qi (__a, __imm);
4596 }
4597
4598 __extension__ extern __inline uint16x8_t
4599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4600 __arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4601 {
4602 return __builtin_mve_vsubq_uv8hi (__a, __b);
4603 }
4604
4605 __extension__ extern __inline uint16x8_t
4606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4607 __arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4608 {
4609 return __builtin_mve_vsubq_n_uv8hi (__a, __b);
4610 }
4611
4612 __extension__ extern __inline uint16x8_t
4613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4614 __arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
4615 {
4616 return __builtin_mve_vrmulhq_uv8hi (__a, __b);
4617 }
4618
4619 __extension__ extern __inline uint16x8_t
4620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4621 __arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4622 {
4623 return __builtin_mve_vrhaddq_uv8hi (__a, __b);
4624 }
4625
4626 __extension__ extern __inline uint16x8_t
4627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4628 __arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4629 {
4630 return __builtin_mve_vqsubq_uv8hi (__a, __b);
4631 }
4632
4633 __extension__ extern __inline uint16x8_t
4634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4635 __arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4636 {
4637 return __builtin_mve_vqsubq_n_uv8hi (__a, __b);
4638 }
4639
4640 __extension__ extern __inline uint16x8_t
4641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4642 __arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4643 {
4644 return __builtin_mve_vqaddq_uv8hi (__a, __b);
4645 }
4646
4647 __extension__ extern __inline uint16x8_t
4648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4649 __arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4650 {
4651 return __builtin_mve_vqaddq_n_uv8hi (__a, __b);
4652 }
4653
4654 __extension__ extern __inline uint16x8_t
4655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4656 __arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
4657 {
4658 return __builtin_mve_vorrq_uv8hi (__a, __b);
4659 }
4660
4661 __extension__ extern __inline uint16x8_t
4662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4663 __arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b)
4664 {
4665 return __builtin_mve_vornq_uv8hi (__a, __b);
4666 }
4667
4668 __extension__ extern __inline uint16x8_t
4669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4670 __arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
4671 {
4672 return __builtin_mve_vmulq_uv8hi (__a, __b);
4673 }
4674
4675 __extension__ extern __inline uint16x8_t
4676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4677 __arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
4678 {
4679 return __builtin_mve_vmulq_n_uv8hi (__a, __b);
4680 }
4681
4682 __extension__ extern __inline uint32x4_t
4683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4684 __arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b)
4685 {
4686 return __builtin_mve_vmulltq_int_uv8hi (__a, __b);
4687 }
4688
4689 __extension__ extern __inline uint32x4_t
4690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4691 __arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b)
4692 {
4693 return __builtin_mve_vmullbq_int_uv8hi (__a, __b);
4694 }
4695
4696 __extension__ extern __inline uint16x8_t
4697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4698 __arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
4699 {
4700 return __builtin_mve_vmulhq_uv8hi (__a, __b);
4701 }
4702
4703 __extension__ extern __inline uint32_t
4704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4705 __arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b)
4706 {
4707 return __builtin_mve_vmladavq_uv8hi (__a, __b);
4708 }
4709
4710 __extension__ extern __inline uint16_t
4711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4712 __arm_vminvq_u16 (uint16_t __a, uint16x8_t __b)
4713 {
4714 return __builtin_mve_vminvq_uv8hi (__a, __b);
4715 }
4716
4717 __extension__ extern __inline uint16x8_t
4718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4719 __arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b)
4720 {
4721 return __builtin_mve_vminq_uv8hi (__a, __b);
4722 }
4723
4724 __extension__ extern __inline uint16_t
4725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4726 __arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b)
4727 {
4728 return __builtin_mve_vmaxvq_uv8hi (__a, __b);
4729 }
4730
4731 __extension__ extern __inline uint16x8_t
4732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4733 __arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
4734 {
4735 return __builtin_mve_vmaxq_uv8hi (__a, __b);
4736 }
4737
4738 __extension__ extern __inline uint16x8_t
4739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4740 __arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
4741 {
4742 return __builtin_mve_vhsubq_uv8hi (__a, __b);
4743 }
4744
4745 __extension__ extern __inline uint16x8_t
4746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4747 __arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b)
4748 {
4749 return __builtin_mve_vhsubq_n_uv8hi (__a, __b);
4750 }
4751
4752 __extension__ extern __inline uint16x8_t
4753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4754 __arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
4755 {
4756 return __builtin_mve_vhaddq_uv8hi (__a, __b);
4757 }
4758
4759 __extension__ extern __inline uint16x8_t
4760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4761 __arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4762 {
4763 return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
4764 }
4765
4766 __extension__ extern __inline uint16x8_t
4767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4768 __arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
4769 {
4770 return __builtin_mve_veorq_uv8hi (__a, __b);
4771 }
4772
4773 __extension__ extern __inline mve_pred16_t
4774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4775 __arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
4776 {
4777 return __builtin_mve_vcmpneq_n_uv8hi (__a, __b);
4778 }
4779
4780 __extension__ extern __inline mve_pred16_t
4781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4782 __arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b)
4783 {
4784 return __builtin_mve_vcmphiq_uv8hi (__a, __b);
4785 }
4786
4787 __extension__ extern __inline mve_pred16_t
4788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4789 __arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b)
4790 {
4791 return __builtin_mve_vcmphiq_n_uv8hi (__a, __b);
4792 }
4793
4794 __extension__ extern __inline mve_pred16_t
4795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4796 __arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b)
4797 {
4798 return __builtin_mve_vcmpeqq_uv8hi (__a, __b);
4799 }
4800
4801 __extension__ extern __inline mve_pred16_t
4802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4803 __arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b)
4804 {
4805 return __builtin_mve_vcmpeqq_n_uv8hi (__a, __b);
4806 }
4807
4808 __extension__ extern __inline mve_pred16_t
4809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4810 __arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b)
4811 {
4812 return __builtin_mve_vcmpcsq_uv8hi (__a, __b);
4813 }
4814
4815 __extension__ extern __inline mve_pred16_t
4816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4817 __arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b)
4818 {
4819 return __builtin_mve_vcmpcsq_n_uv8hi (__a, __b);
4820 }
4821
4822 __extension__ extern __inline uint16x8_t
4823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4824 __arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b)
4825 {
4826 return (uint16x8_t)
4827 __builtin_mve_vcaddq_rot90v8hi ((int16x8_t)__a, (int16x8_t)__b);
4828 }
4829
4830 __extension__ extern __inline uint16x8_t
4831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4832 __arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b)
4833 {
4834 return (uint16x8_t)
4835 __builtin_mve_vcaddq_rot270v8hi ((int16x8_t)__a, (int16x8_t)__b);
4836 }
4837
4838 __extension__ extern __inline uint16x8_t
4839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4840 __arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
4841 {
4842 return __builtin_mve_vbicq_uv8hi (__a, __b);
4843 }
4844
4845 __extension__ extern __inline uint16x8_t
4846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4847 __arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
4848 {
4849 return __builtin_mve_vandq_uv8hi (__a, __b);
4850 }
4851
4852 __extension__ extern __inline uint32_t
4853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4854 __arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
4855 {
4856 return __builtin_mve_vaddvq_p_uv8hi (__a, __p);
4857 }
4858
4859 __extension__ extern __inline uint32_t
4860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4861 __arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b)
4862 {
4863 return __builtin_mve_vaddvaq_uv8hi (__a, __b);
4864 }
4865
4866 __extension__ extern __inline uint16x8_t
4867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4868 __arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b)
4869 {
4870 return __builtin_mve_vaddq_n_uv8hi (__a, __b);
4871 }
4872
4873 __extension__ extern __inline uint16x8_t
4874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4875 __arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
4876 {
4877 return __builtin_mve_vabdq_uv8hi (__a, __b);
4878 }
4879
4880 __extension__ extern __inline uint16x8_t
4881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4882 __arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
4883 {
4884 return __builtin_mve_vshlq_r_uv8hi (__a, __b);
4885 }
4886
4887 __extension__ extern __inline uint16x8_t
4888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4889 __arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
4890 {
4891 return __builtin_mve_vrshlq_uv8hi (__a, __b);
4892 }
4893
4894 __extension__ extern __inline uint16x8_t
4895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4896 __arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
4897 {
4898 return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
4899 }
4900
4901 __extension__ extern __inline uint16x8_t
4902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4903 __arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
4904 {
4905 return __builtin_mve_vqshlq_uv8hi (__a, __b);
4906 }
4907
4908 __extension__ extern __inline uint16x8_t
4909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4910 __arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b)
4911 {
4912 return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
4913 }
4914
4915 __extension__ extern __inline uint16x8_t
4916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4917 __arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
4918 {
4919 return __builtin_mve_vqrshlq_uv8hi (__a, __b);
4920 }
4921
4922 __extension__ extern __inline uint16x8_t
4923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4924 __arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
4925 {
4926 return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
4927 }
4928
4929 __extension__ extern __inline uint16_t
4930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4931 __arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
4932 {
4933 return __builtin_mve_vminavq_sv8hi (__a, __b);
4934 }
4935
4936 __extension__ extern __inline uint16x8_t
4937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4938 __arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b)
4939 {
4940 return __builtin_mve_vminaq_sv8hi (__a, __b);
4941 }
4942
4943 __extension__ extern __inline uint16_t
4944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4945 __arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b)
4946 {
4947 return __builtin_mve_vmaxavq_sv8hi (__a, __b);
4948 }
4949
4950 __extension__ extern __inline uint16x8_t
4951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4952 __arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b)
4953 {
4954 return __builtin_mve_vmaxaq_sv8hi (__a, __b);
4955 }
4956
4957 __extension__ extern __inline uint16x8_t
4958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4959 __arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b)
4960 {
4961 return __builtin_mve_vbrsrq_n_uv8hi (__a, __b);
4962 }
4963
4964 __extension__ extern __inline uint16x8_t
4965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4966 __arm_vshlq_n_u16 (uint16x8_t __a, const int __imm)
4967 {
4968 return __builtin_mve_vshlq_n_uv8hi (__a, __imm);
4969 }
4970
4971 __extension__ extern __inline uint16x8_t
4972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4973 __arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm)
4974 {
4975 return __builtin_mve_vrshrq_n_uv8hi (__a, __imm);
4976 }
4977
4978 __extension__ extern __inline uint16x8_t
4979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4980 __arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm)
4981 {
4982 return __builtin_mve_vqshlq_n_uv8hi (__a, __imm);
4983 }
4984
4985 __extension__ extern __inline mve_pred16_t
4986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4987 __arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b)
4988 {
4989 return __builtin_mve_vcmpneq_n_sv8hi (__a, __b);
4990 }
4991
4992 __extension__ extern __inline mve_pred16_t
4993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4994 __arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b)
4995 {
4996 return __builtin_mve_vcmpltq_sv8hi (__a, __b);
4997 }
4998
4999 __extension__ extern __inline mve_pred16_t
5000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5001 __arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b)
5002 {
5003 return __builtin_mve_vcmpltq_n_sv8hi (__a, __b);
5004 }
5005
5006 __extension__ extern __inline mve_pred16_t
5007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5008 __arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b)
5009 {
5010 return __builtin_mve_vcmpleq_sv8hi (__a, __b);
5011 }
5012
5013 __extension__ extern __inline mve_pred16_t
5014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5015 __arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b)
5016 {
5017 return __builtin_mve_vcmpleq_n_sv8hi (__a, __b);
5018 }
5019
5020 __extension__ extern __inline mve_pred16_t
5021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5022 __arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b)
5023 {
5024 return __builtin_mve_vcmpgtq_sv8hi (__a, __b);
5025 }
5026
5027 __extension__ extern __inline mve_pred16_t
5028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5029 __arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b)
5030 {
5031 return __builtin_mve_vcmpgtq_n_sv8hi (__a, __b);
5032 }
5033
5034 __extension__ extern __inline mve_pred16_t
5035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5036 __arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b)
5037 {
5038 return __builtin_mve_vcmpgeq_sv8hi (__a, __b);
5039 }
5040
5041 __extension__ extern __inline mve_pred16_t
5042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5043 __arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b)
5044 {
5045 return __builtin_mve_vcmpgeq_n_sv8hi (__a, __b);
5046 }
5047
5048 __extension__ extern __inline mve_pred16_t
5049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5050 __arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b)
5051 {
5052 return __builtin_mve_vcmpeqq_sv8hi (__a, __b);
5053 }
5054
5055 __extension__ extern __inline mve_pred16_t
5056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5057 __arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b)
5058 {
5059 return __builtin_mve_vcmpeqq_n_sv8hi (__a, __b);
5060 }
5061
5062 __extension__ extern __inline uint16x8_t
5063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5064 __arm_vqshluq_n_s16 (int16x8_t __a, const int __imm)
5065 {
5066 return __builtin_mve_vqshluq_n_sv8hi (__a, __imm);
5067 }
5068
5069 __extension__ extern __inline int32_t
5070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5071 __arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p)
5072 {
5073 return __builtin_mve_vaddvq_p_sv8hi (__a, __p);
5074 }
5075
5076 __extension__ extern __inline int16x8_t
5077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5078 __arm_vsubq_s16 (int16x8_t __a, int16x8_t __b)
5079 {
5080 return __builtin_mve_vsubq_sv8hi (__a, __b);
5081 }
5082
5083 __extension__ extern __inline int16x8_t
5084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5085 __arm_vsubq_n_s16 (int16x8_t __a, int16_t __b)
5086 {
5087 return __builtin_mve_vsubq_n_sv8hi (__a, __b);
5088 }
5089
5090 __extension__ extern __inline int16x8_t
5091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5092 __arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
5093 {
5094 return __builtin_mve_vshlq_r_sv8hi (__a, __b);
5095 }
5096
5097 __extension__ extern __inline int16x8_t
5098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5099 __arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
5100 {
5101 return __builtin_mve_vrshlq_sv8hi (__a, __b);
5102 }
5103
5104 __extension__ extern __inline int16x8_t
5105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5106 __arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
5107 {
5108 return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
5109 }
5110
5111 __extension__ extern __inline int16x8_t
5112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5113 __arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b)
5114 {
5115 return __builtin_mve_vrmulhq_sv8hi (__a, __b);
5116 }
5117
5118 __extension__ extern __inline int16x8_t
5119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5120 __arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
5121 {
5122 return __builtin_mve_vrhaddq_sv8hi (__a, __b);
5123 }
5124
5125 __extension__ extern __inline int16x8_t
5126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5127 __arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b)
5128 {
5129 return __builtin_mve_vqsubq_sv8hi (__a, __b);
5130 }
5131
5132 __extension__ extern __inline int16x8_t
5133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5134 __arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b)
5135 {
5136 return __builtin_mve_vqsubq_n_sv8hi (__a, __b);
5137 }
5138
5139 __extension__ extern __inline int16x8_t
5140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5141 __arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
5142 {
5143 return __builtin_mve_vqshlq_sv8hi (__a, __b);
5144 }
5145
5146 __extension__ extern __inline int16x8_t
5147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5148 __arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
5149 {
5150 return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
5151 }
5152
5153 __extension__ extern __inline int16x8_t
5154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5155 __arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
5156 {
5157 return __builtin_mve_vqrshlq_sv8hi (__a, __b);
5158 }
5159
5160 __extension__ extern __inline int16x8_t
5161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5162 __arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
5163 {
5164 return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
5165 }
5166
5167 __extension__ extern __inline int16x8_t
5168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5169 __arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
5170 {
5171 return __builtin_mve_vqrdmulhq_sv8hi (__a, __b);
5172 }
5173
5174 __extension__ extern __inline int16x8_t
5175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5176 __arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
5177 {
5178 return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b);
5179 }
5180
5181 __extension__ extern __inline int16x8_t
5182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5183 __arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
5184 {
5185 return __builtin_mve_vqdmulhq_sv8hi (__a, __b);
5186 }
5187
5188 __extension__ extern __inline int16x8_t
5189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5190 __arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
5191 {
5192 return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b);
5193 }
5194
5195 __extension__ extern __inline int16x8_t
5196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5197 __arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b)
5198 {
5199 return __builtin_mve_vqaddq_sv8hi (__a, __b);
5200 }
5201
5202 __extension__ extern __inline int16x8_t
5203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5204 __arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b)
5205 {
5206 return __builtin_mve_vqaddq_n_sv8hi (__a, __b);
5207 }
5208
5209 __extension__ extern __inline int16x8_t
5210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5211 __arm_vorrq_s16 (int16x8_t __a, int16x8_t __b)
5212 {
5213 return __builtin_mve_vorrq_sv8hi (__a, __b);
5214 }
5215
5216 __extension__ extern __inline int16x8_t
5217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5218 __arm_vornq_s16 (int16x8_t __a, int16x8_t __b)
5219 {
5220 return __builtin_mve_vornq_sv8hi (__a, __b);
5221 }
5222
5223 __extension__ extern __inline int16x8_t
5224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5225 __arm_vmulq_s16 (int16x8_t __a, int16x8_t __b)
5226 {
5227 return __builtin_mve_vmulq_sv8hi (__a, __b);
5228 }
5229
5230 __extension__ extern __inline int16x8_t
5231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5232 __arm_vmulq_n_s16 (int16x8_t __a, int16_t __b)
5233 {
5234 return __builtin_mve_vmulq_n_sv8hi (__a, __b);
5235 }
5236
5237 __extension__ extern __inline int32x4_t
5238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5239 __arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b)
5240 {
5241 return __builtin_mve_vmulltq_int_sv8hi (__a, __b);
5242 }
5243
5244 __extension__ extern __inline int32x4_t
5245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5246 __arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b)
5247 {
5248 return __builtin_mve_vmullbq_int_sv8hi (__a, __b);
5249 }
5250
5251 __extension__ extern __inline int16x8_t
5252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5253 __arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b)
5254 {
5255 return __builtin_mve_vmulhq_sv8hi (__a, __b);
5256 }
5257
5258 __extension__ extern __inline int32_t
5259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5260 __arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b)
5261 {
5262 return __builtin_mve_vmlsdavxq_sv8hi (__a, __b);
5263 }
5264
5265 __extension__ extern __inline int32_t
5266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5267 __arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b)
5268 {
5269 return __builtin_mve_vmlsdavq_sv8hi (__a, __b);
5270 }
5271
5272 __extension__ extern __inline int32_t
5273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5274 __arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b)
5275 {
5276 return __builtin_mve_vmladavxq_sv8hi (__a, __b);
5277 }
5278
5279 __extension__ extern __inline int32_t
5280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5281 __arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b)
5282 {
5283 return __builtin_mve_vmladavq_sv8hi (__a, __b);
5284 }
5285
5286 __extension__ extern __inline int16_t
5287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5288 __arm_vminvq_s16 (int16_t __a, int16x8_t __b)
5289 {
5290 return __builtin_mve_vminvq_sv8hi (__a, __b);
5291 }
5292
5293 __extension__ extern __inline int16x8_t
5294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5295 __arm_vminq_s16 (int16x8_t __a, int16x8_t __b)
5296 {
5297 return __builtin_mve_vminq_sv8hi (__a, __b);
5298 }
5299
5300 __extension__ extern __inline int16_t
5301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5302 __arm_vmaxvq_s16 (int16_t __a, int16x8_t __b)
5303 {
5304 return __builtin_mve_vmaxvq_sv8hi (__a, __b);
5305 }
5306
5307 __extension__ extern __inline int16x8_t
5308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5309 __arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b)
5310 {
5311 return __builtin_mve_vmaxq_sv8hi (__a, __b);
5312 }
5313
5314 __extension__ extern __inline int16x8_t
5315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5316 __arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b)
5317 {
5318 return __builtin_mve_vhsubq_sv8hi (__a, __b);
5319 }
5320
5321 __extension__ extern __inline int16x8_t
5322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5323 __arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b)
5324 {
5325 return __builtin_mve_vhsubq_n_sv8hi (__a, __b);
5326 }
5327
5328 __extension__ extern __inline int16x8_t
5329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5330 __arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
5331 {
5332 return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b);
5333 }
5334
5335 __extension__ extern __inline int16x8_t
5336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5337 __arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
5338 {
5339 return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b);
5340 }
5341
5342 __extension__ extern __inline int16x8_t
5343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5344 __arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b)
5345 {
5346 return __builtin_mve_vhaddq_sv8hi (__a, __b);
5347 }
5348
5349 __extension__ extern __inline int16x8_t
5350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5351 __arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b)
5352 {
5353 return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
5354 }
5355
5356 __extension__ extern __inline int16x8_t
5357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5358 __arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
5359 {
5360 return __builtin_mve_veorq_sv8hi (__a, __b);
5361 }
5362
5363 __extension__ extern __inline int16x8_t
5364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5365 __arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
5366 {
5367 return __builtin_mve_vcaddq_rot90v8hi (__a, __b);
5368 }
5369
5370 __extension__ extern __inline int16x8_t
5371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5372 __arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
5373 {
5374 return __builtin_mve_vcaddq_rot270v8hi (__a, __b);
5375 }
5376
5377 __extension__ extern __inline int16x8_t
5378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5379 __arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b)
5380 {
5381 return __builtin_mve_vbrsrq_n_sv8hi (__a, __b);
5382 }
5383
5384 __extension__ extern __inline int16x8_t
5385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5386 __arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
5387 {
5388 return __builtin_mve_vbicq_sv8hi (__a, __b);
5389 }
5390
5391 __extension__ extern __inline int16x8_t
5392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5393 __arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
5394 {
5395 return __builtin_mve_vandq_sv8hi (__a, __b);
5396 }
5397
5398 __extension__ extern __inline int32_t
5399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5400 __arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
5401 {
5402 return __builtin_mve_vaddvaq_sv8hi (__a, __b);
5403 }
5404
5405 __extension__ extern __inline int16x8_t
5406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5407 __arm_vaddq_n_s16 (int16x8_t __a, int16_t __b)
5408 {
5409 return __builtin_mve_vaddq_n_sv8hi (__a, __b);
5410 }
5411
5412 __extension__ extern __inline int16x8_t
5413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5414 __arm_vabdq_s16 (int16x8_t __a, int16x8_t __b)
5415 {
5416 return __builtin_mve_vabdq_sv8hi (__a, __b);
5417 }
5418
5419 __extension__ extern __inline int16x8_t
5420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5421 __arm_vshlq_n_s16 (int16x8_t __a, const int __imm)
5422 {
5423 return __builtin_mve_vshlq_n_sv8hi (__a, __imm);
5424 }
5425
5426 __extension__ extern __inline int16x8_t
5427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5428 __arm_vrshrq_n_s16 (int16x8_t __a, const int __imm)
5429 {
5430 return __builtin_mve_vrshrq_n_sv8hi (__a, __imm);
5431 }
5432
5433 __extension__ extern __inline int16x8_t
5434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5435 __arm_vqshlq_n_s16 (int16x8_t __a, const int __imm)
5436 {
5437 return __builtin_mve_vqshlq_n_sv8hi (__a, __imm);
5438 }
5439
5440 __extension__ extern __inline uint32x4_t
5441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5442 __arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5443 {
5444 return __builtin_mve_vsubq_uv4si (__a, __b);
5445 }
5446
5447 __extension__ extern __inline uint32x4_t
5448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5449 __arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5450 {
5451 return __builtin_mve_vsubq_n_uv4si (__a, __b);
5452 }
5453
5454 __extension__ extern __inline uint32x4_t
5455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5456 __arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
5457 {
5458 return __builtin_mve_vrmulhq_uv4si (__a, __b);
5459 }
5460
5461 __extension__ extern __inline uint32x4_t
5462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5463 __arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5464 {
5465 return __builtin_mve_vrhaddq_uv4si (__a, __b);
5466 }
5467
5468 __extension__ extern __inline uint32x4_t
5469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5470 __arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5471 {
5472 return __builtin_mve_vqsubq_uv4si (__a, __b);
5473 }
5474
5475 __extension__ extern __inline uint32x4_t
5476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5477 __arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5478 {
5479 return __builtin_mve_vqsubq_n_uv4si (__a, __b);
5480 }
5481
5482 __extension__ extern __inline uint32x4_t
5483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5484 __arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5485 {
5486 return __builtin_mve_vqaddq_uv4si (__a, __b);
5487 }
5488
5489 __extension__ extern __inline uint32x4_t
5490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5491 __arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5492 {
5493 return __builtin_mve_vqaddq_n_uv4si (__a, __b);
5494 }
5495
5496 __extension__ extern __inline uint32x4_t
5497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5498 __arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
5499 {
5500 return __builtin_mve_vorrq_uv4si (__a, __b);
5501 }
5502
5503 __extension__ extern __inline uint32x4_t
5504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5505 __arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b)
5506 {
5507 return __builtin_mve_vornq_uv4si (__a, __b);
5508 }
5509
5510 __extension__ extern __inline uint32x4_t
5511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5512 __arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
5513 {
5514 return __builtin_mve_vmulq_uv4si (__a, __b);
5515 }
5516
5517 __extension__ extern __inline uint32x4_t
5518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5519 __arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
5520 {
5521 return __builtin_mve_vmulq_n_uv4si (__a, __b);
5522 }
5523
5524 __extension__ extern __inline uint64x2_t
5525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5526 __arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b)
5527 {
5528 return __builtin_mve_vmulltq_int_uv4si (__a, __b);
5529 }
5530
5531 __extension__ extern __inline uint64x2_t
5532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5533 __arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b)
5534 {
5535 return __builtin_mve_vmullbq_int_uv4si (__a, __b);
5536 }
5537
5538 __extension__ extern __inline uint32x4_t
5539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5540 __arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
5541 {
5542 return __builtin_mve_vmulhq_uv4si (__a, __b);
5543 }
5544
5545 __extension__ extern __inline uint32_t
5546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5547 __arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b)
5548 {
5549 return __builtin_mve_vmladavq_uv4si (__a, __b);
5550 }
5551
5552 __extension__ extern __inline uint32_t
5553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5554 __arm_vminvq_u32 (uint32_t __a, uint32x4_t __b)
5555 {
5556 return __builtin_mve_vminvq_uv4si (__a, __b);
5557 }
5558
5559 __extension__ extern __inline uint32x4_t
5560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5561 __arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b)
5562 {
5563 return __builtin_mve_vminq_uv4si (__a, __b);
5564 }
5565
5566 __extension__ extern __inline uint32_t
5567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5568 __arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b)
5569 {
5570 return __builtin_mve_vmaxvq_uv4si (__a, __b);
5571 }
5572
5573 __extension__ extern __inline uint32x4_t
5574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5575 __arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
5576 {
5577 return __builtin_mve_vmaxq_uv4si (__a, __b);
5578 }
5579
5580 __extension__ extern __inline uint32x4_t
5581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5582 __arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
5583 {
5584 return __builtin_mve_vhsubq_uv4si (__a, __b);
5585 }
5586
5587 __extension__ extern __inline uint32x4_t
5588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5589 __arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b)
5590 {
5591 return __builtin_mve_vhsubq_n_uv4si (__a, __b);
5592 }
5593
5594 __extension__ extern __inline uint32x4_t
5595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5596 __arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
5597 {
5598 return __builtin_mve_vhaddq_uv4si (__a, __b);
5599 }
5600
5601 __extension__ extern __inline uint32x4_t
5602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5603 __arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5604 {
5605 return __builtin_mve_vhaddq_n_uv4si (__a, __b);
5606 }
5607
5608 __extension__ extern __inline uint32x4_t
5609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5610 __arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
5611 {
5612 return __builtin_mve_veorq_uv4si (__a, __b);
5613 }
5614
5615 __extension__ extern __inline mve_pred16_t
5616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5617 __arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
5618 {
5619 return __builtin_mve_vcmpneq_n_uv4si (__a, __b);
5620 }
5621
5622 __extension__ extern __inline mve_pred16_t
5623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5624 __arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b)
5625 {
5626 return __builtin_mve_vcmphiq_uv4si (__a, __b);
5627 }
5628
5629 __extension__ extern __inline mve_pred16_t
5630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5631 __arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b)
5632 {
5633 return __builtin_mve_vcmphiq_n_uv4si (__a, __b);
5634 }
5635
5636 __extension__ extern __inline mve_pred16_t
5637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5638 __arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b)
5639 {
5640 return __builtin_mve_vcmpeqq_uv4si (__a, __b);
5641 }
5642
5643 __extension__ extern __inline mve_pred16_t
5644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5645 __arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b)
5646 {
5647 return __builtin_mve_vcmpeqq_n_uv4si (__a, __b);
5648 }
5649
5650 __extension__ extern __inline mve_pred16_t
5651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5652 __arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b)
5653 {
5654 return __builtin_mve_vcmpcsq_uv4si (__a, __b);
5655 }
5656
5657 __extension__ extern __inline mve_pred16_t
5658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5659 __arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b)
5660 {
5661 return __builtin_mve_vcmpcsq_n_uv4si (__a, __b);
5662 }
5663
5664 __extension__ extern __inline uint32x4_t
5665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5666 __arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b)
5667 {
5668 return (uint32x4_t)
5669 __builtin_mve_vcaddq_rot90v4si ((int32x4_t)__a, (int32x4_t)__b);
5670 }
5671
5672 __extension__ extern __inline uint32x4_t
5673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5674 __arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b)
5675 {
5676 return (uint32x4_t)
5677 __builtin_mve_vcaddq_rot270v4si ((int32x4_t)__a, (int32x4_t)__b);
5678 }
5679
5680 __extension__ extern __inline uint32x4_t
5681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5682 __arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
5683 {
5684 return __builtin_mve_vbicq_uv4si (__a, __b);
5685 }
5686
5687 __extension__ extern __inline uint32x4_t
5688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5689 __arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
5690 {
5691 return __builtin_mve_vandq_uv4si (__a, __b);
5692 }
5693
5694 __extension__ extern __inline uint32_t
5695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5696 __arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
5697 {
5698 return __builtin_mve_vaddvq_p_uv4si (__a, __p);
5699 }
5700
5701 __extension__ extern __inline uint32_t
5702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5703 __arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b)
5704 {
5705 return __builtin_mve_vaddvaq_uv4si (__a, __b);
5706 }
5707
5708 __extension__ extern __inline uint32x4_t
5709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5710 __arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b)
5711 {
5712 return __builtin_mve_vaddq_n_uv4si (__a, __b);
5713 }
5714
5715 __extension__ extern __inline uint32x4_t
5716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5717 __arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
5718 {
5719 return __builtin_mve_vabdq_uv4si (__a, __b);
5720 }
5721
5722 __extension__ extern __inline uint32x4_t
5723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5724 __arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
5725 {
5726 return __builtin_mve_vshlq_r_uv4si (__a, __b);
5727 }
5728
5729 __extension__ extern __inline uint32x4_t
5730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5731 __arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
5732 {
5733 return __builtin_mve_vrshlq_uv4si (__a, __b);
5734 }
5735
5736 __extension__ extern __inline uint32x4_t
5737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5738 __arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
5739 {
5740 return __builtin_mve_vrshlq_n_uv4si (__a, __b);
5741 }
5742
5743 __extension__ extern __inline uint32x4_t
5744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5745 __arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
5746 {
5747 return __builtin_mve_vqshlq_uv4si (__a, __b);
5748 }
5749
5750 __extension__ extern __inline uint32x4_t
5751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5752 __arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b)
5753 {
5754 return __builtin_mve_vqshlq_r_uv4si (__a, __b);
5755 }
5756
5757 __extension__ extern __inline uint32x4_t
5758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5759 __arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
5760 {
5761 return __builtin_mve_vqrshlq_uv4si (__a, __b);
5762 }
5763
5764 __extension__ extern __inline uint32x4_t
5765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5766 __arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
5767 {
5768 return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
5769 }
5770
5771 __extension__ extern __inline uint32_t
5772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5773 __arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
5774 {
5775 return __builtin_mve_vminavq_sv4si (__a, __b);
5776 }
5777
5778 __extension__ extern __inline uint32x4_t
5779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5780 __arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b)
5781 {
5782 return __builtin_mve_vminaq_sv4si (__a, __b);
5783 }
5784
5785 __extension__ extern __inline uint32_t
5786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5787 __arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b)
5788 {
5789 return __builtin_mve_vmaxavq_sv4si (__a, __b);
5790 }
5791
5792 __extension__ extern __inline uint32x4_t
5793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5794 __arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b)
5795 {
5796 return __builtin_mve_vmaxaq_sv4si (__a, __b);
5797 }
5798
5799 __extension__ extern __inline uint32x4_t
5800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5801 __arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b)
5802 {
5803 return __builtin_mve_vbrsrq_n_uv4si (__a, __b);
5804 }
5805
5806 __extension__ extern __inline uint32x4_t
5807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5808 __arm_vshlq_n_u32 (uint32x4_t __a, const int __imm)
5809 {
5810 return __builtin_mve_vshlq_n_uv4si (__a, __imm);
5811 }
5812
5813 __extension__ extern __inline uint32x4_t
5814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5815 __arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm)
5816 {
5817 return __builtin_mve_vrshrq_n_uv4si (__a, __imm);
5818 }
5819
5820 __extension__ extern __inline uint32x4_t
5821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5822 __arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm)
5823 {
5824 return __builtin_mve_vqshlq_n_uv4si (__a, __imm);
5825 }
5826
5827 __extension__ extern __inline mve_pred16_t
5828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5829 __arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b)
5830 {
5831 return __builtin_mve_vcmpneq_n_sv4si (__a, __b);
5832 }
5833
5834 __extension__ extern __inline mve_pred16_t
5835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5836 __arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b)
5837 {
5838 return __builtin_mve_vcmpltq_sv4si (__a, __b);
5839 }
5840
5841 __extension__ extern __inline mve_pred16_t
5842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5843 __arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b)
5844 {
5845 return __builtin_mve_vcmpltq_n_sv4si (__a, __b);
5846 }
5847
5848 __extension__ extern __inline mve_pred16_t
5849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5850 __arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b)
5851 {
5852 return __builtin_mve_vcmpleq_sv4si (__a, __b);
5853 }
5854
5855 __extension__ extern __inline mve_pred16_t
5856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5857 __arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b)
5858 {
5859 return __builtin_mve_vcmpleq_n_sv4si (__a, __b);
5860 }
5861
5862 __extension__ extern __inline mve_pred16_t
5863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5864 __arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b)
5865 {
5866 return __builtin_mve_vcmpgtq_sv4si (__a, __b);
5867 }
5868
5869 __extension__ extern __inline mve_pred16_t
5870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5871 __arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b)
5872 {
5873 return __builtin_mve_vcmpgtq_n_sv4si (__a, __b);
5874 }
5875
5876 __extension__ extern __inline mve_pred16_t
5877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5878 __arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b)
5879 {
5880 return __builtin_mve_vcmpgeq_sv4si (__a, __b);
5881 }
5882
5883 __extension__ extern __inline mve_pred16_t
5884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5885 __arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b)
5886 {
5887 return __builtin_mve_vcmpgeq_n_sv4si (__a, __b);
5888 }
5889
5890 __extension__ extern __inline mve_pred16_t
5891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5892 __arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b)
5893 {
5894 return __builtin_mve_vcmpeqq_sv4si (__a, __b);
5895 }
5896
5897 __extension__ extern __inline mve_pred16_t
5898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5899 __arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b)
5900 {
5901 return __builtin_mve_vcmpeqq_n_sv4si (__a, __b);
5902 }
5903
5904 __extension__ extern __inline uint32x4_t
5905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5906 __arm_vqshluq_n_s32 (int32x4_t __a, const int __imm)
5907 {
5908 return __builtin_mve_vqshluq_n_sv4si (__a, __imm);
5909 }
5910
5911 __extension__ extern __inline int32_t
5912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5913 __arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
5914 {
5915 return __builtin_mve_vaddvq_p_sv4si (__a, __p);
5916 }
5917
5918 __extension__ extern __inline int32x4_t
5919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5920 __arm_vsubq_s32 (int32x4_t __a, int32x4_t __b)
5921 {
5922 return __builtin_mve_vsubq_sv4si (__a, __b);
5923 }
5924
5925 __extension__ extern __inline int32x4_t
5926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5927 __arm_vsubq_n_s32 (int32x4_t __a, int32_t __b)
5928 {
5929 return __builtin_mve_vsubq_n_sv4si (__a, __b);
5930 }
5931
5932 __extension__ extern __inline int32x4_t
5933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5934 __arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
5935 {
5936 return __builtin_mve_vshlq_r_sv4si (__a, __b);
5937 }
5938
5939 __extension__ extern __inline int32x4_t
5940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5941 __arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
5942 {
5943 return __builtin_mve_vrshlq_sv4si (__a, __b);
5944 }
5945
5946 __extension__ extern __inline int32x4_t
5947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5948 __arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
5949 {
5950 return __builtin_mve_vrshlq_n_sv4si (__a, __b);
5951 }
5952
5953 __extension__ extern __inline int32x4_t
5954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5955 __arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b)
5956 {
5957 return __builtin_mve_vrmulhq_sv4si (__a, __b);
5958 }
5959
5960 __extension__ extern __inline int32x4_t
5961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5962 __arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
5963 {
5964 return __builtin_mve_vrhaddq_sv4si (__a, __b);
5965 }
5966
5967 __extension__ extern __inline int32x4_t
5968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5969 __arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b)
5970 {
5971 return __builtin_mve_vqsubq_sv4si (__a, __b);
5972 }
5973
5974 __extension__ extern __inline int32x4_t
5975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5976 __arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b)
5977 {
5978 return __builtin_mve_vqsubq_n_sv4si (__a, __b);
5979 }
5980
5981 __extension__ extern __inline int32x4_t
5982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5983 __arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
5984 {
5985 return __builtin_mve_vqshlq_sv4si (__a, __b);
5986 }
5987
5988 __extension__ extern __inline int32x4_t
5989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5990 __arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
5991 {
5992 return __builtin_mve_vqshlq_r_sv4si (__a, __b);
5993 }
5994
5995 __extension__ extern __inline int32x4_t
5996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5997 __arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
5998 {
5999 return __builtin_mve_vqrshlq_sv4si (__a, __b);
6000 }
6001
6002 __extension__ extern __inline int32x4_t
6003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6004 __arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
6005 {
6006 return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
6007 }
6008
6009 __extension__ extern __inline int32x4_t
6010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6011 __arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
6012 {
6013 return __builtin_mve_vqrdmulhq_sv4si (__a, __b);
6014 }
6015
6016 __extension__ extern __inline int32x4_t
6017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6018 __arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
6019 {
6020 return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b);
6021 }
6022
6023 __extension__ extern __inline int32x4_t
6024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6025 __arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
6026 {
6027 return __builtin_mve_vqdmulhq_sv4si (__a, __b);
6028 }
6029
6030 __extension__ extern __inline int32x4_t
6031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6032 __arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
6033 {
6034 return __builtin_mve_vqdmulhq_n_sv4si (__a, __b);
6035 }
6036
6037 __extension__ extern __inline int32x4_t
6038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6039 __arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b)
6040 {
6041 return __builtin_mve_vqaddq_sv4si (__a, __b);
6042 }
6043
6044 __extension__ extern __inline int32x4_t
6045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6046 __arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b)
6047 {
6048 return __builtin_mve_vqaddq_n_sv4si (__a, __b);
6049 }
6050
6051 __extension__ extern __inline int32x4_t
6052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6053 __arm_vorrq_s32 (int32x4_t __a, int32x4_t __b)
6054 {
6055 return __builtin_mve_vorrq_sv4si (__a, __b);
6056 }
6057
6058 __extension__ extern __inline int32x4_t
6059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6060 __arm_vornq_s32 (int32x4_t __a, int32x4_t __b)
6061 {
6062 return __builtin_mve_vornq_sv4si (__a, __b);
6063 }
6064
6065 __extension__ extern __inline int32x4_t
6066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6067 __arm_vmulq_s32 (int32x4_t __a, int32x4_t __b)
6068 {
6069 return __builtin_mve_vmulq_sv4si (__a, __b);
6070 }
6071
6072 __extension__ extern __inline int32x4_t
6073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6074 __arm_vmulq_n_s32 (int32x4_t __a, int32_t __b)
6075 {
6076 return __builtin_mve_vmulq_n_sv4si (__a, __b);
6077 }
6078
6079 __extension__ extern __inline int64x2_t
6080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6081 __arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b)
6082 {
6083 return __builtin_mve_vmulltq_int_sv4si (__a, __b);
6084 }
6085
6086 __extension__ extern __inline int64x2_t
6087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6088 __arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b)
6089 {
6090 return __builtin_mve_vmullbq_int_sv4si (__a, __b);
6091 }
6092
6093 __extension__ extern __inline int32x4_t
6094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6095 __arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b)
6096 {
6097 return __builtin_mve_vmulhq_sv4si (__a, __b);
6098 }
6099
6100 __extension__ extern __inline int32_t
6101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6102 __arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b)
6103 {
6104 return __builtin_mve_vmlsdavxq_sv4si (__a, __b);
6105 }
6106
6107 __extension__ extern __inline int32_t
6108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6109 __arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b)
6110 {
6111 return __builtin_mve_vmlsdavq_sv4si (__a, __b);
6112 }
6113
6114 __extension__ extern __inline int32_t
6115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6116 __arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b)
6117 {
6118 return __builtin_mve_vmladavxq_sv4si (__a, __b);
6119 }
6120
6121 __extension__ extern __inline int32_t
6122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6123 __arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b)
6124 {
6125 return __builtin_mve_vmladavq_sv4si (__a, __b);
6126 }
6127
6128 __extension__ extern __inline int32_t
6129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6130 __arm_vminvq_s32 (int32_t __a, int32x4_t __b)
6131 {
6132 return __builtin_mve_vminvq_sv4si (__a, __b);
6133 }
6134
6135 __extension__ extern __inline int32x4_t
6136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6137 __arm_vminq_s32 (int32x4_t __a, int32x4_t __b)
6138 {
6139 return __builtin_mve_vminq_sv4si (__a, __b);
6140 }
6141
6142 __extension__ extern __inline int32_t
6143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6144 __arm_vmaxvq_s32 (int32_t __a, int32x4_t __b)
6145 {
6146 return __builtin_mve_vmaxvq_sv4si (__a, __b);
6147 }
6148
6149 __extension__ extern __inline int32x4_t
6150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6151 __arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b)
6152 {
6153 return __builtin_mve_vmaxq_sv4si (__a, __b);
6154 }
6155
6156 __extension__ extern __inline int32x4_t
6157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6158 __arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b)
6159 {
6160 return __builtin_mve_vhsubq_sv4si (__a, __b);
6161 }
6162
6163 __extension__ extern __inline int32x4_t
6164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6165 __arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b)
6166 {
6167 return __builtin_mve_vhsubq_n_sv4si (__a, __b);
6168 }
6169
6170 __extension__ extern __inline int32x4_t
6171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6172 __arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
6173 {
6174 return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b);
6175 }
6176
6177 __extension__ extern __inline int32x4_t
6178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6179 __arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
6180 {
6181 return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b);
6182 }
6183
6184 __extension__ extern __inline int32x4_t
6185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6186 __arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b)
6187 {
6188 return __builtin_mve_vhaddq_sv4si (__a, __b);
6189 }
6190
6191 __extension__ extern __inline int32x4_t
6192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6193 __arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b)
6194 {
6195 return __builtin_mve_vhaddq_n_sv4si (__a, __b);
6196 }
6197
6198 __extension__ extern __inline int32x4_t
6199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6200 __arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
6201 {
6202 return __builtin_mve_veorq_sv4si (__a, __b);
6203 }
6204
6205 __extension__ extern __inline int32x4_t
6206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6207 __arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
6208 {
6209 return __builtin_mve_vcaddq_rot90v4si (__a, __b);
6210 }
6211
6212 __extension__ extern __inline int32x4_t
6213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6214 __arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
6215 {
6216 return __builtin_mve_vcaddq_rot270v4si (__a, __b);
6217 }
6218
6219 __extension__ extern __inline int32x4_t
6220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6221 __arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b)
6222 {
6223 return __builtin_mve_vbrsrq_n_sv4si (__a, __b);
6224 }
6225
6226 __extension__ extern __inline int32x4_t
6227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6228 __arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
6229 {
6230 return __builtin_mve_vbicq_sv4si (__a, __b);
6231 }
6232
6233 __extension__ extern __inline int32x4_t
6234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6235 __arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
6236 {
6237 return __builtin_mve_vandq_sv4si (__a, __b);
6238 }
6239
6240 __extension__ extern __inline int32_t
6241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6242 __arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
6243 {
6244 return __builtin_mve_vaddvaq_sv4si (__a, __b);
6245 }
6246
6247 __extension__ extern __inline int32x4_t
6248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6249 __arm_vaddq_n_s32 (int32x4_t __a, int32_t __b)
6250 {
6251 return __builtin_mve_vaddq_n_sv4si (__a, __b);
6252 }
6253
6254 __extension__ extern __inline int32x4_t
6255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6256 __arm_vabdq_s32 (int32x4_t __a, int32x4_t __b)
6257 {
6258 return __builtin_mve_vabdq_sv4si (__a, __b);
6259 }
6260
6261 __extension__ extern __inline int32x4_t
6262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6263 __arm_vshlq_n_s32 (int32x4_t __a, const int __imm)
6264 {
6265 return __builtin_mve_vshlq_n_sv4si (__a, __imm);
6266 }
6267
6268 __extension__ extern __inline int32x4_t
6269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6270 __arm_vrshrq_n_s32 (int32x4_t __a, const int __imm)
6271 {
6272 return __builtin_mve_vrshrq_n_sv4si (__a, __imm);
6273 }
6274
6275 __extension__ extern __inline int32x4_t
6276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6277 __arm_vqshlq_n_s32 (int32x4_t __a, const int __imm)
6278 {
6279 return __builtin_mve_vqshlq_n_sv4si (__a, __imm);
6280 }
6281
6282 __extension__ extern __inline uint8x16_t
6283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6284 __arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
6285 {
6286 return __builtin_mve_vqmovntq_uv8hi (__a, __b);
6287 }
6288
6289 __extension__ extern __inline uint8x16_t
6290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6291 __arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
6292 {
6293 return __builtin_mve_vqmovnbq_uv8hi (__a, __b);
6294 }
6295
6296 __extension__ extern __inline uint16x8_t
6297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6298 __arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
6299 {
6300 return __builtin_mve_vmulltq_poly_pv16qi (__a, __b);
6301 }
6302
6303 __extension__ extern __inline uint16x8_t
6304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6305 __arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
6306 {
6307 return __builtin_mve_vmullbq_poly_pv16qi (__a, __b);
6308 }
6309
6310 __extension__ extern __inline uint8x16_t
6311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6312 __arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
6313 {
6314 return __builtin_mve_vmovntq_uv8hi (__a, __b);
6315 }
6316
6317 __extension__ extern __inline uint8x16_t
6318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6319 __arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
6320 {
6321 return __builtin_mve_vmovnbq_uv8hi (__a, __b);
6322 }
6323
6324 __extension__ extern __inline uint64_t
6325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6326 __arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b)
6327 {
6328 return __builtin_mve_vmlaldavq_uv8hi (__a, __b);
6329 }
6330
6331 __extension__ extern __inline uint8x16_t
6332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6333 __arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b)
6334 {
6335 return __builtin_mve_vqmovuntq_sv8hi (__a, __b);
6336 }
6337
6338 __extension__ extern __inline uint8x16_t
6339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6340 __arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b)
6341 {
6342 return __builtin_mve_vqmovunbq_sv8hi (__a, __b);
6343 }
6344
6345 __extension__ extern __inline uint16x8_t
6346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6347 __arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm)
6348 {
6349 return __builtin_mve_vshlltq_n_uv16qi (__a, __imm);
6350 }
6351
6352 __extension__ extern __inline uint16x8_t
6353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6354 __arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm)
6355 {
6356 return __builtin_mve_vshllbq_n_uv16qi (__a, __imm);
6357 }
6358
6359 __extension__ extern __inline uint16x8_t
6360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6361 __arm_vorrq_n_u16 (uint16x8_t __a, const int __imm)
6362 {
6363 return __builtin_mve_vorrq_n_uv8hi (__a, __imm);
6364 }
6365
6366 __extension__ extern __inline uint16x8_t
6367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6368 __arm_vbicq_n_u16 (uint16x8_t __a, const int __imm)
6369 {
6370 return __builtin_mve_vbicq_n_uv8hi (__a, __imm);
6371 }
6372
6373 __extension__ extern __inline int8x16_t
6374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6375 __arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b)
6376 {
6377 return __builtin_mve_vqmovntq_sv8hi (__a, __b);
6378 }
6379
6380 __extension__ extern __inline int8x16_t
6381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6382 __arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b)
6383 {
6384 return __builtin_mve_vqmovnbq_sv8hi (__a, __b);
6385 }
6386
6387 __extension__ extern __inline int32x4_t
6388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6389 __arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b)
6390 {
6391 return __builtin_mve_vqdmulltq_sv8hi (__a, __b);
6392 }
6393
6394 __extension__ extern __inline int32x4_t
6395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6396 __arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b)
6397 {
6398 return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b);
6399 }
6400
6401 __extension__ extern __inline int32x4_t
6402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6403 __arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b)
6404 {
6405 return __builtin_mve_vqdmullbq_sv8hi (__a, __b);
6406 }
6407
6408 __extension__ extern __inline int32x4_t
6409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6410 __arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b)
6411 {
6412 return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b);
6413 }
6414
6415 __extension__ extern __inline int8x16_t
6416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6417 __arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b)
6418 {
6419 return __builtin_mve_vmovntq_sv8hi (__a, __b);
6420 }
6421
6422 __extension__ extern __inline int8x16_t
6423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6424 __arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b)
6425 {
6426 return __builtin_mve_vmovnbq_sv8hi (__a, __b);
6427 }
6428
6429 __extension__ extern __inline int64_t
6430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6431 __arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b)
6432 {
6433 return __builtin_mve_vmlsldavxq_sv8hi (__a, __b);
6434 }
6435
6436 __extension__ extern __inline int64_t
6437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6438 __arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b)
6439 {
6440 return __builtin_mve_vmlsldavq_sv8hi (__a, __b);
6441 }
6442
6443 __extension__ extern __inline int64_t
6444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6445 __arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b)
6446 {
6447 return __builtin_mve_vmlaldavxq_sv8hi (__a, __b);
6448 }
6449
6450 __extension__ extern __inline int64_t
6451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6452 __arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b)
6453 {
6454 return __builtin_mve_vmlaldavq_sv8hi (__a, __b);
6455 }
6456
6457 __extension__ extern __inline int16x8_t
6458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6459 __arm_vshlltq_n_s8 (int8x16_t __a, const int __imm)
6460 {
6461 return __builtin_mve_vshlltq_n_sv16qi (__a, __imm);
6462 }
6463
6464 __extension__ extern __inline int16x8_t
6465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6466 __arm_vshllbq_n_s8 (int8x16_t __a, const int __imm)
6467 {
6468 return __builtin_mve_vshllbq_n_sv16qi (__a, __imm);
6469 }
6470
6471 __extension__ extern __inline int16x8_t
6472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6473 __arm_vorrq_n_s16 (int16x8_t __a, const int __imm)
6474 {
6475 return __builtin_mve_vorrq_n_sv8hi (__a, __imm);
6476 }
6477
6478 __extension__ extern __inline int16x8_t
6479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6480 __arm_vbicq_n_s16 (int16x8_t __a, const int __imm)
6481 {
6482 return __builtin_mve_vbicq_n_sv8hi (__a, __imm);
6483 }
6484
6485 __extension__ extern __inline uint16x8_t
6486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6487 __arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
6488 {
6489 return __builtin_mve_vqmovntq_uv4si (__a, __b);
6490 }
6491
6492 __extension__ extern __inline uint16x8_t
6493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6494 __arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
6495 {
6496 return __builtin_mve_vqmovnbq_uv4si (__a, __b);
6497 }
6498
6499 __extension__ extern __inline uint32x4_t
6500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6501 __arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
6502 {
6503 return __builtin_mve_vmulltq_poly_pv8hi (__a, __b);
6504 }
6505
6506 __extension__ extern __inline uint32x4_t
6507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6508 __arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
6509 {
6510 return __builtin_mve_vmullbq_poly_pv8hi (__a, __b);
6511 }
6512
6513 __extension__ extern __inline uint16x8_t
6514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6515 __arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
6516 {
6517 return __builtin_mve_vmovntq_uv4si (__a, __b);
6518 }
6519
6520 __extension__ extern __inline uint16x8_t
6521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6522 __arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
6523 {
6524 return __builtin_mve_vmovnbq_uv4si (__a, __b);
6525 }
6526
6527 __extension__ extern __inline uint64_t
6528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6529 __arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b)
6530 {
6531 return __builtin_mve_vmlaldavq_uv4si (__a, __b);
6532 }
6533
6534 __extension__ extern __inline uint16x8_t
6535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6536 __arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b)
6537 {
6538 return __builtin_mve_vqmovuntq_sv4si (__a, __b);
6539 }
6540
6541 __extension__ extern __inline uint16x8_t
6542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6543 __arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b)
6544 {
6545 return __builtin_mve_vqmovunbq_sv4si (__a, __b);
6546 }
6547
6548 __extension__ extern __inline uint32x4_t
6549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6550 __arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm)
6551 {
6552 return __builtin_mve_vshlltq_n_uv8hi (__a, __imm);
6553 }
6554
6555 __extension__ extern __inline uint32x4_t
6556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6557 __arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm)
6558 {
6559 return __builtin_mve_vshllbq_n_uv8hi (__a, __imm);
6560 }
6561
6562 __extension__ extern __inline uint32x4_t
6563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6564 __arm_vorrq_n_u32 (uint32x4_t __a, const int __imm)
6565 {
6566 return __builtin_mve_vorrq_n_uv4si (__a, __imm);
6567 }
6568
6569 __extension__ extern __inline uint32x4_t
6570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6571 __arm_vbicq_n_u32 (uint32x4_t __a, const int __imm)
6572 {
6573 return __builtin_mve_vbicq_n_uv4si (__a, __imm);
6574 }
6575
6576 __extension__ extern __inline int16x8_t
6577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6578 __arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b)
6579 {
6580 return __builtin_mve_vqmovntq_sv4si (__a, __b);
6581 }
6582
6583 __extension__ extern __inline int16x8_t
6584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6585 __arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b)
6586 {
6587 return __builtin_mve_vqmovnbq_sv4si (__a, __b);
6588 }
6589
6590 __extension__ extern __inline int64x2_t
6591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6592 __arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b)
6593 {
6594 return __builtin_mve_vqdmulltq_sv4si (__a, __b);
6595 }
6596
6597 __extension__ extern __inline int64x2_t
6598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6599 __arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b)
6600 {
6601 return __builtin_mve_vqdmulltq_n_sv4si (__a, __b);
6602 }
6603
6604 __extension__ extern __inline int64x2_t
6605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6606 __arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b)
6607 {
6608 return __builtin_mve_vqdmullbq_sv4si (__a, __b);
6609 }
6610
6611 __extension__ extern __inline int64x2_t
6612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6613 __arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b)
6614 {
6615 return __builtin_mve_vqdmullbq_n_sv4si (__a, __b);
6616 }
6617
6618 __extension__ extern __inline int16x8_t
6619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6620 __arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b)
6621 {
6622 return __builtin_mve_vmovntq_sv4si (__a, __b);
6623 }
6624
6625 __extension__ extern __inline int16x8_t
6626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6627 __arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b)
6628 {
6629 return __builtin_mve_vmovnbq_sv4si (__a, __b);
6630 }
6631
6632 __extension__ extern __inline int64_t
6633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6634 __arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b)
6635 {
6636 return __builtin_mve_vmlsldavxq_sv4si (__a, __b);
6637 }
6638
6639 __extension__ extern __inline int64_t
6640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6641 __arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b)
6642 {
6643 return __builtin_mve_vmlsldavq_sv4si (__a, __b);
6644 }
6645
6646 __extension__ extern __inline int64_t
6647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6648 __arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b)
6649 {
6650 return __builtin_mve_vmlaldavxq_sv4si (__a, __b);
6651 }
6652
6653 __extension__ extern __inline int64_t
6654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6655 __arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b)
6656 {
6657 return __builtin_mve_vmlaldavq_sv4si (__a, __b);
6658 }
6659
6660 __extension__ extern __inline int32x4_t
6661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6662 __arm_vshlltq_n_s16 (int16x8_t __a, const int __imm)
6663 {
6664 return __builtin_mve_vshlltq_n_sv8hi (__a, __imm);
6665 }
6666
6667 __extension__ extern __inline int32x4_t
6668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6669 __arm_vshllbq_n_s16 (int16x8_t __a, const int __imm)
6670 {
6671 return __builtin_mve_vshllbq_n_sv8hi (__a, __imm);
6672 }
6673
6674 __extension__ extern __inline int32x4_t
6675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6676 __arm_vorrq_n_s32 (int32x4_t __a, const int __imm)
6677 {
6678 return __builtin_mve_vorrq_n_sv4si (__a, __imm);
6679 }
6680
6681 __extension__ extern __inline int32x4_t
6682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6683 __arm_vbicq_n_s32 (int32x4_t __a, const int __imm)
6684 {
6685 return __builtin_mve_vbicq_n_sv4si (__a, __imm);
6686 }
6687
6688 __extension__ extern __inline uint64_t
6689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6690 __arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b)
6691 {
6692 return __builtin_mve_vrmlaldavhq_uv4si (__a, __b);
6693 }
6694
6695 __extension__ extern __inline mve_pred16_t
6696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6697 __arm_vctp8q_m (uint32_t __a, mve_pred16_t __p)
6698 {
6699 return __builtin_mve_vctp8q_mhi (__a, __p);
6700 }
6701
6702 __extension__ extern __inline mve_pred16_t
6703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6704 __arm_vctp64q_m (uint32_t __a, mve_pred16_t __p)
6705 {
6706 return __builtin_mve_vctp64q_mhi (__a, __p);
6707 }
6708
6709 __extension__ extern __inline mve_pred16_t
6710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6711 __arm_vctp32q_m (uint32_t __a, mve_pred16_t __p)
6712 {
6713 return __builtin_mve_vctp32q_mhi (__a, __p);
6714 }
6715
6716 __extension__ extern __inline mve_pred16_t
6717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6718 __arm_vctp16q_m (uint32_t __a, mve_pred16_t __p)
6719 {
6720 return __builtin_mve_vctp16q_mhi (__a, __p);
6721 }
6722
6723 __extension__ extern __inline uint64_t
6724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6725 __arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b)
6726 {
6727 return __builtin_mve_vaddlvaq_uv4si (__a, __b);
6728 }
6729
6730 __extension__ extern __inline int64_t
6731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6732 __arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b)
6733 {
6734 return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b);
6735 }
6736
6737 __extension__ extern __inline int64_t
6738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6739 __arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b)
6740 {
6741 return __builtin_mve_vrmlsldavhq_sv4si (__a, __b);
6742 }
6743
6744 __extension__ extern __inline int64_t
6745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6746 __arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b)
6747 {
6748 return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b);
6749 }
6750
6751 __extension__ extern __inline int64_t
6752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6753 __arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b)
6754 {
6755 return __builtin_mve_vrmlaldavhq_sv4si (__a, __b);
6756 }
6757
6758 __extension__ extern __inline int64_t
6759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6760 __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b)
6761 {
6762 return __builtin_mve_vaddlvaq_sv4si (__a, __b);
6763 }
6764
6765 __extension__ extern __inline uint32_t
6766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6767 __arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c)
6768 {
6769 return __builtin_mve_vabavq_sv16qi (__a, __b, __c);
6770 }
6771
6772 __extension__ extern __inline uint32_t
6773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6774 __arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c)
6775 {
6776 return __builtin_mve_vabavq_sv8hi (__a, __b, __c);
6777 }
6778
6779 __extension__ extern __inline uint32_t
6780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6781 __arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c)
6782 {
6783 return __builtin_mve_vabavq_sv4si (__a, __b, __c);
6784 }
6785
6786 __extension__ extern __inline uint32_t
6787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6788 __arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
6789 {
6790 return __builtin_mve_vabavq_uv16qi(__a, __b, __c);
6791 }
6792
6793 __extension__ extern __inline uint32_t
6794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6795 __arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
6796 {
6797 return __builtin_mve_vabavq_uv8hi(__a, __b, __c);
6798 }
6799
6800 __extension__ extern __inline uint32_t
6801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6802 __arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
6803 {
6804 return __builtin_mve_vabavq_uv4si(__a, __b, __c);
6805 }
6806
6807 __extension__ extern __inline int16x8_t
6808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6809 __arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
6810 {
6811 return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p);
6812 }
6813
6814 __extension__ extern __inline int32x4_t
6815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6816 __arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
6817 {
6818 return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p);
6819 }
6820
6821 __extension__ extern __inline uint16x8_t
6822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6823 __arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
6824 {
6825 return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p);
6826 }
6827
6828 __extension__ extern __inline uint32x4_t
6829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6830 __arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
6831 {
6832 return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p);
6833 }
6834
6835 __extension__ extern __inline int8x16_t
6836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6837 __arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
6838 {
6839 return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm);
6840 }
6841
6842 __extension__ extern __inline uint8x16_t
6843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6844 __arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
6845 {
6846 return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm);
6847 }
6848
6849 __extension__ extern __inline int16x8_t
6850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6851 __arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
6852 {
6853 return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm);
6854 }
6855
6856 __extension__ extern __inline uint16x8_t
6857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6858 __arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
6859 {
6860 return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm);
6861 }
6862
6863 __extension__ extern __inline uint8x16_t
6864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6865 __arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
6866 {
6867 return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm);
6868 }
6869
6870 __extension__ extern __inline uint16x8_t
6871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6872 __arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
6873 {
6874 return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm);
6875 }
6876
6877 __extension__ extern __inline int64_t
6878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6879 __arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
6880 {
6881 return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c);
6882 }
6883
6884 __extension__ extern __inline uint64_t
6885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6886 __arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
6887 {
6888 return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c);
6889 }
6890
6891 __extension__ extern __inline int8x16_t
6892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6893 __arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm)
6894 {
6895 int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm);
6896 *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm);
6897 return __res;
6898 }
6899
6900 __extension__ extern __inline uint8x16_t
6901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6902 __arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm)
6903 {
6904 uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm);
6905 *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm);
6906 return __res;
6907 }
6908
6909 __extension__ extern __inline int16x8_t
6910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6911 __arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm)
6912 {
6913 int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm);
6914 *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm);
6915 return __res;
6916 }
6917
6918 __extension__ extern __inline uint16x8_t
6919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6920 __arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm)
6921 {
6922 uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm);
6923 *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm);
6924 return __res;
6925 }
6926
6927 __extension__ extern __inline int32x4_t
6928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6929 __arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm)
6930 {
6931 int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm);
6932 *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm);
6933 return __res;
6934 }
6935
6936 __extension__ extern __inline uint32x4_t
6937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6938 __arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm)
6939 {
6940 uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm);
6941 *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm);
6942 return __res;
6943 }
6944
6945 __extension__ extern __inline uint8x16_t
6946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6947 __arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
6948 {
6949 return __builtin_mve_vpselq_uv16qi (__a, __b, __p);
6950 }
6951
6952 __extension__ extern __inline int8x16_t
6953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6954 __arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
6955 {
6956 return __builtin_mve_vpselq_sv16qi (__a, __b, __p);
6957 }
6958
6959 __extension__ extern __inline uint8x16_t
6960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6961 __arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
6962 {
6963 return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p);
6964 }
6965
6966 __extension__ extern __inline uint8x16_t
6967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6968 __arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
6969 {
6970 return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p);
6971 }
6972
6973 __extension__ extern __inline uint8x16_t
6974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6975 __arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
6976 {
6977 return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c);
6978 }
6979
6980 __extension__ extern __inline uint8x16_t
6981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6982 __arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
6983 {
6984 return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c);
6985 }
6986
6987 __extension__ extern __inline uint32_t
6988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6989 __arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
6990 {
6991 return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p);
6992 }
6993
6994 __extension__ extern __inline uint32_t
6995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6996 __arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
6997 {
6998 return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c);
6999 }
7000
7001 __extension__ extern __inline uint8_t
7002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7003 __arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
7004 {
7005 return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p);
7006 }
7007
7008 __extension__ extern __inline uint8_t
7009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7010 __arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
7011 {
7012 return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p);
7013 }
7014
7015 __extension__ extern __inline uint8x16_t
7016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7017 __arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
7018 {
7019 return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p);
7020 }
7021
7022 __extension__ extern __inline mve_pred16_t
7023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7024 __arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7025 {
7026 return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p);
7027 }
7028
7029 __extension__ extern __inline mve_pred16_t
7030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7031 __arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7032 {
7033 return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p);
7034 }
7035
7036 __extension__ extern __inline mve_pred16_t
7037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7038 __arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7039 {
7040 return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p);
7041 }
7042
7043 __extension__ extern __inline mve_pred16_t
7044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7045 __arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7046 {
7047 return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p);
7048 }
7049
7050 __extension__ extern __inline mve_pred16_t
7051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7052 __arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7053 {
7054 return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p);
7055 }
7056
7057 __extension__ extern __inline mve_pred16_t
7058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7059 __arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7060 {
7061 return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p);
7062 }
7063
7064 __extension__ extern __inline mve_pred16_t
7065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7066 __arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
7067 {
7068 return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p);
7069 }
7070
7071 __extension__ extern __inline mve_pred16_t
7072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7073 __arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
7074 {
7075 return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p);
7076 }
7077
7078 __extension__ extern __inline uint8x16_t
7079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7080 __arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
7081 {
7082 return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p);
7083 }
7084
7085 __extension__ extern __inline uint32_t
7086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7087 __arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
7088 {
7089 return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p);
7090 }
7091
7092 __extension__ extern __inline uint8x16_t
7093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7094 __arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
7095 {
7096 return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm);
7097 }
7098
7099 __extension__ extern __inline uint8x16_t
7100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7101 __arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
7102 {
7103 return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm);
7104 }
7105
7106 __extension__ extern __inline uint8x16_t
7107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7108 __arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7109 {
7110 return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p);
7111 }
7112
7113 __extension__ extern __inline uint8x16_t
7114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7115 __arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7116 {
7117 return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p);
7118 }
7119
7120 __extension__ extern __inline uint8x16_t
7121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7122 __arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7123 {
7124 return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p);
7125 }
7126
7127 __extension__ extern __inline uint8x16_t
7128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7129 __arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
7130 {
7131 return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p);
7132 }
7133
7134 __extension__ extern __inline uint8_t
7135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7136 __arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
7137 {
7138 return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p);
7139 }
7140
7141 __extension__ extern __inline uint8x16_t
7142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7143 __arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7144 {
7145 return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p);
7146 }
7147
7148 __extension__ extern __inline uint8_t
7149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7150 __arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
7151 {
7152 return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p);
7153 }
7154
7155 __extension__ extern __inline uint8x16_t
7156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7157 __arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7158 {
7159 return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p);
7160 }
7161
7162 __extension__ extern __inline mve_pred16_t
7163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7164 __arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7165 {
7166 return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p);
7167 }
7168
7169 __extension__ extern __inline mve_pred16_t
7170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7171 __arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7172 {
7173 return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p);
7174 }
7175
7176 __extension__ extern __inline mve_pred16_t
7177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7178 __arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7179 {
7180 return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p);
7181 }
7182
7183 __extension__ extern __inline mve_pred16_t
7184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7185 __arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7186 {
7187 return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p);
7188 }
7189
7190 __extension__ extern __inline mve_pred16_t
7191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7192 __arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7193 {
7194 return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p);
7195 }
7196
7197 __extension__ extern __inline mve_pred16_t
7198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7199 __arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7200 {
7201 return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p);
7202 }
7203
7204 __extension__ extern __inline mve_pred16_t
7205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7206 __arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7207 {
7208 return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p);
7209 }
7210
7211 __extension__ extern __inline mve_pred16_t
7212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7213 __arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7214 {
7215 return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p);
7216 }
7217
7218 __extension__ extern __inline mve_pred16_t
7219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7220 __arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7221 {
7222 return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p);
7223 }
7224
7225 __extension__ extern __inline mve_pred16_t
7226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7227 __arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7228 {
7229 return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p);
7230 }
7231
7232 __extension__ extern __inline mve_pred16_t
7233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7234 __arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7235 {
7236 return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p);
7237 }
7238
7239 __extension__ extern __inline mve_pred16_t
7240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7241 __arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
7242 {
7243 return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p);
7244 }
7245
7246 __extension__ extern __inline int8x16_t
7247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7248 __arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7249 {
7250 return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p);
7251 }
7252
7253 __extension__ extern __inline int8x16_t
7254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7255 __arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7256 {
7257 return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p);
7258 }
7259
7260 __extension__ extern __inline int8x16_t
7261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7262 __arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7263 {
7264 return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p);
7265 }
7266
7267 __extension__ extern __inline int8x16_t
7268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7269 __arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7270 {
7271 return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p);
7272 }
7273
7274 __extension__ extern __inline int8x16_t
7275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7276 __arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
7277 {
7278 return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p);
7279 }
7280
7281 __extension__ extern __inline int8x16_t
7282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7283 __arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7284 {
7285 return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p);
7286 }
7287
7288 __extension__ extern __inline int8x16_t
7289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7290 __arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7291 {
7292 return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p);
7293 }
7294
7295 __extension__ extern __inline int8x16_t
7296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7297 __arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7298 {
7299 return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p);
7300 }
7301
7302
7303 __extension__ extern __inline int8x16_t
7304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7305 __arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7306 {
7307 return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p);
7308 }
7309
7310 __extension__ extern __inline int32_t
7311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7312 __arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7313 {
7314 return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p);
7315 }
7316
7317 __extension__ extern __inline int32_t
7318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7319 __arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7320 {
7321 return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p);
7322 }
7323
7324 __extension__ extern __inline int32_t
7325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7326 __arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7327 {
7328 return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p);
7329 }
7330
7331 __extension__ extern __inline int32_t
7332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7333 __arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
7334 {
7335 return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p);
7336 }
7337
7338 __extension__ extern __inline int8_t
7339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7340 __arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
7341 {
7342 return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p);
7343 }
7344
7345 __extension__ extern __inline int8_t
7346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7347 __arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
7348 {
7349 return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p);
7350 }
7351
7352 __extension__ extern __inline int8x16_t
7353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7354 __arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
7355 {
7356 return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p);
7357 }
7358
7359 __extension__ extern __inline int8x16_t
7360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7361 __arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7362 {
7363 return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p);
7364 }
7365
7366 __extension__ extern __inline int8x16_t
7367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7368 __arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7369 {
7370 return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p);
7371 }
7372
7373 __extension__ extern __inline int32_t
7374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7375 __arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p)
7376 {
7377 return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p);
7378 }
7379
7380 __extension__ extern __inline int8x16_t
7381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7382 __arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
7383 {
7384 return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p);
7385 }
7386
7387 __extension__ extern __inline int8x16_t
7388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7389 __arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7390 {
7391 return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b);
7392 }
7393
7394 __extension__ extern __inline int8x16_t
7395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7396 __arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7397 {
7398 return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b);
7399 }
7400
7401 __extension__ extern __inline int8x16_t
7402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7403 __arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7404 {
7405 return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c);
7406 }
7407
7408 __extension__ extern __inline int8x16_t
7409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7410 __arm_vqdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7411 {
7412 return __builtin_mve_vqdmlashq_n_sv16qi (__a, __b, __c);
7413 }
7414
7415 __extension__ extern __inline int8x16_t
7416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7417 __arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7418 {
7419 return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c);
7420 }
7421
7422 __extension__ extern __inline int8x16_t
7423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7424 __arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7425 {
7426 return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b);
7427 }
7428
7429 __extension__ extern __inline int8x16_t
7430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7431 __arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7432 {
7433 return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b);
7434 }
7435
7436 __extension__ extern __inline int8x16_t
7437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7438 __arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7439 {
7440 return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b);
7441 }
7442
7443 __extension__ extern __inline int8x16_t
7444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7445 __arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7446 {
7447 return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b);
7448 }
7449
7450 __extension__ extern __inline int8x16_t
7451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7452 __arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7453 {
7454 return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c);
7455 }
7456
7457 __extension__ extern __inline int8x16_t
7458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7459 __arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7460 {
7461 return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b);
7462 }
7463
7464 __extension__ extern __inline int8x16_t
7465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7466 __arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
7467 {
7468 return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b);
7469 }
7470
7471 __extension__ extern __inline int32_t
7472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7473 __arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7474 {
7475 return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c);
7476 }
7477
7478 __extension__ extern __inline int32_t
7479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7480 __arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7481 {
7482 return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c);
7483 }
7484
7485 __extension__ extern __inline int8x16_t
7486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7487 __arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7488 {
7489 return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c);
7490 }
7491
7492 __extension__ extern __inline int8x16_t
7493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7494 __arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
7495 {
7496 return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c);
7497 }
7498
7499 __extension__ extern __inline int32_t
7500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7501 __arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7502 {
7503 return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c);
7504 }
7505
7506 __extension__ extern __inline int32_t
7507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7508 __arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
7509 {
7510 return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c);
7511 }
7512
7513 __extension__ extern __inline int8x16_t
7514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7515 __arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
7516 {
7517 return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm);
7518 }
7519
7520 __extension__ extern __inline int8x16_t
7521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7522 __arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
7523 {
7524 return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm);
7525 }
7526
7527 __extension__ extern __inline uint16x8_t
7528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7529 __arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7530 {
7531 return __builtin_mve_vpselq_uv8hi (__a, __b, __p);
7532 }
7533
7534 __extension__ extern __inline int16x8_t
7535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7536 __arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7537 {
7538 return __builtin_mve_vpselq_sv8hi (__a, __b, __p);
7539 }
7540
7541 __extension__ extern __inline uint16x8_t
7542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7543 __arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7544 {
7545 return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p);
7546 }
7547
7548 __extension__ extern __inline uint16x8_t
7549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7550 __arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7551 {
7552 return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p);
7553 }
7554
7555 __extension__ extern __inline uint16x8_t
7556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7557 __arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
7558 {
7559 return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c);
7560 }
7561
7562 __extension__ extern __inline uint16x8_t
7563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7564 __arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
7565 {
7566 return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c);
7567 }
7568
7569 __extension__ extern __inline uint32_t
7570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7571 __arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7572 {
7573 return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p);
7574 }
7575
7576 __extension__ extern __inline uint32_t
7577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7578 __arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
7579 {
7580 return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c);
7581 }
7582
7583 __extension__ extern __inline uint16_t
7584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7585 __arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
7586 {
7587 return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p);
7588 }
7589
7590 __extension__ extern __inline uint16_t
7591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7592 __arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
7593 {
7594 return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p);
7595 }
7596
7597 __extension__ extern __inline uint16x8_t
7598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7599 __arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
7600 {
7601 return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p);
7602 }
7603
7604 __extension__ extern __inline mve_pred16_t
7605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7606 __arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7607 {
7608 return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p);
7609 }
7610
7611 __extension__ extern __inline mve_pred16_t
7612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7613 __arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7614 {
7615 return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p);
7616 }
7617
7618 __extension__ extern __inline mve_pred16_t
7619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7620 __arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7621 {
7622 return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p);
7623 }
7624
7625 __extension__ extern __inline mve_pred16_t
7626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7627 __arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7628 {
7629 return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p);
7630 }
7631
7632 __extension__ extern __inline mve_pred16_t
7633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7634 __arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7635 {
7636 return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p);
7637 }
7638
7639 __extension__ extern __inline mve_pred16_t
7640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7641 __arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7642 {
7643 return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p);
7644 }
7645
7646 __extension__ extern __inline mve_pred16_t
7647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7648 __arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
7649 {
7650 return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p);
7651 }
7652
7653 __extension__ extern __inline mve_pred16_t
7654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7655 __arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
7656 {
7657 return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p);
7658 }
7659
7660 __extension__ extern __inline uint16x8_t
7661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7662 __arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
7663 {
7664 return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p);
7665 }
7666
7667 __extension__ extern __inline uint32_t
7668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7669 __arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
7670 {
7671 return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p);
7672 }
7673
7674 __extension__ extern __inline uint16x8_t
7675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7676 __arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
7677 {
7678 return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm);
7679 }
7680
7681 __extension__ extern __inline uint16x8_t
7682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7683 __arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
7684 {
7685 return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm);
7686 }
7687
7688 __extension__ extern __inline uint16x8_t
7689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7690 __arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7691 {
7692 return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p);
7693 }
7694
7695 __extension__ extern __inline uint16x8_t
7696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7697 __arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7698 {
7699 return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p);
7700 }
7701
7702 __extension__ extern __inline uint16x8_t
7703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7704 __arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7705 {
7706 return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p);
7707 }
7708
7709 __extension__ extern __inline uint16x8_t
7710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7711 __arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
7712 {
7713 return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p);
7714 }
7715
7716 __extension__ extern __inline uint16_t
7717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7718 __arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
7719 {
7720 return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p);
7721 }
7722
7723 __extension__ extern __inline uint16x8_t
7724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7725 __arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7726 {
7727 return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p);
7728 }
7729
7730 __extension__ extern __inline uint16_t
7731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7732 __arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
7733 {
7734 return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p);
7735 }
7736
7737 __extension__ extern __inline uint16x8_t
7738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7739 __arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7740 {
7741 return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p);
7742 }
7743
7744 __extension__ extern __inline mve_pred16_t
7745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7746 __arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7747 {
7748 return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p);
7749 }
7750
7751 __extension__ extern __inline mve_pred16_t
7752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7753 __arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7754 {
7755 return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p);
7756 }
7757
7758 __extension__ extern __inline mve_pred16_t
7759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7760 __arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7761 {
7762 return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p);
7763 }
7764
7765 __extension__ extern __inline mve_pred16_t
7766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7767 __arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7768 {
7769 return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p);
7770 }
7771
7772 __extension__ extern __inline mve_pred16_t
7773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7774 __arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7775 {
7776 return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p);
7777 }
7778
7779 __extension__ extern __inline mve_pred16_t
7780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7781 __arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7782 {
7783 return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p);
7784 }
7785
7786 __extension__ extern __inline mve_pred16_t
7787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7788 __arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7789 {
7790 return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p);
7791 }
7792
7793 __extension__ extern __inline mve_pred16_t
7794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7795 __arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7796 {
7797 return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p);
7798 }
7799
7800 __extension__ extern __inline mve_pred16_t
7801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7802 __arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7803 {
7804 return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p);
7805 }
7806
7807 __extension__ extern __inline mve_pred16_t
7808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7809 __arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7810 {
7811 return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p);
7812 }
7813
7814 __extension__ extern __inline mve_pred16_t
7815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7816 __arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7817 {
7818 return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p);
7819 }
7820
7821 __extension__ extern __inline mve_pred16_t
7822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7823 __arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
7824 {
7825 return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p);
7826 }
7827
7828 __extension__ extern __inline int16x8_t
7829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7830 __arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7831 {
7832 return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p);
7833 }
7834
7835 __extension__ extern __inline int16x8_t
7836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7837 __arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7838 {
7839 return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p);
7840 }
7841
7842 __extension__ extern __inline int16x8_t
7843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7844 __arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7845 {
7846 return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p);
7847 }
7848
7849 __extension__ extern __inline int16x8_t
7850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7851 __arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7852 {
7853 return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p);
7854 }
7855
7856 __extension__ extern __inline int16x8_t
7857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7858 __arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
7859 {
7860 return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p);
7861 }
7862
7863 __extension__ extern __inline int16x8_t
7864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7865 __arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7866 {
7867 return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p);
7868 }
7869
7870 __extension__ extern __inline int16x8_t
7871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7872 __arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7873 {
7874 return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p);
7875 }
7876
7877 __extension__ extern __inline int16x8_t
7878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7879 __arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7880 {
7881 return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p);
7882 }
7883
7884 __extension__ extern __inline int16x8_t
7885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7886 __arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7887 {
7888 return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p);
7889 }
7890
7891 __extension__ extern __inline int32_t
7892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7893 __arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7894 {
7895 return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p);
7896 }
7897
7898 __extension__ extern __inline int32_t
7899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7900 __arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7901 {
7902 return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p);
7903 }
7904
7905 __extension__ extern __inline int32_t
7906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7907 __arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7908 {
7909 return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p);
7910 }
7911
7912 __extension__ extern __inline int32_t
7913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7914 __arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
7915 {
7916 return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p);
7917 }
7918
7919 __extension__ extern __inline int16_t
7920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7921 __arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
7922 {
7923 return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p);
7924 }
7925
7926 __extension__ extern __inline int16_t
7927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7928 __arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
7929 {
7930 return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p);
7931 }
7932
7933 __extension__ extern __inline int16x8_t
7934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7935 __arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
7936 {
7937 return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p);
7938 }
7939
7940 __extension__ extern __inline int16x8_t
7941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7942 __arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7943 {
7944 return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p);
7945 }
7946
7947 __extension__ extern __inline int16x8_t
7948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7949 __arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7950 {
7951 return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p);
7952 }
7953
7954 __extension__ extern __inline int32_t
7955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7956 __arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p)
7957 {
7958 return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p);
7959 }
7960
7961 __extension__ extern __inline int16x8_t
7962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7963 __arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
7964 {
7965 return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p);
7966 }
7967
7968 __extension__ extern __inline int16x8_t
7969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7970 __arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
7971 {
7972 return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b);
7973 }
7974
7975 __extension__ extern __inline int16x8_t
7976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7977 __arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
7978 {
7979 return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b);
7980 }
7981
7982 __extension__ extern __inline int16x8_t
7983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7984 __arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7985 {
7986 return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c);
7987 }
7988
7989 __extension__ extern __inline int16x8_t
7990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7991 __arm_vqdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7992 {
7993 return __builtin_mve_vqdmlashq_n_sv8hi (__a, __b, __c);
7994 }
7995
7996 __extension__ extern __inline int16x8_t
7997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7998 __arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7999 {
8000 return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c);
8001 }
8002
8003 __extension__ extern __inline int16x8_t
8004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8005 __arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8006 {
8007 return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b);
8008 }
8009
8010 __extension__ extern __inline int16x8_t
8011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8012 __arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8013 {
8014 return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b);
8015 }
8016
8017 __extension__ extern __inline int16x8_t
8018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8019 __arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8020 {
8021 return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b);
8022 }
8023
8024 __extension__ extern __inline int16x8_t
8025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8026 __arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8027 {
8028 return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b);
8029 }
8030
8031 __extension__ extern __inline int16x8_t
8032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8033 __arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8034 {
8035 return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c);
8036 }
8037
8038 __extension__ extern __inline int16x8_t
8039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8040 __arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8041 {
8042 return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b);
8043 }
8044
8045 __extension__ extern __inline int16x8_t
8046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8047 __arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
8048 {
8049 return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b);
8050 }
8051
8052 __extension__ extern __inline int32_t
8053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8054 __arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8055 {
8056 return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c);
8057 }
8058
8059 __extension__ extern __inline int32_t
8060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8061 __arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8062 {
8063 return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c);
8064 }
8065
8066 __extension__ extern __inline int16x8_t
8067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8068 __arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8069 {
8070 return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c);
8071 }
8072
8073 __extension__ extern __inline int16x8_t
8074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8075 __arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8076 {
8077 return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c);
8078 }
8079
8080 __extension__ extern __inline int32_t
8081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8082 __arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8083 {
8084 return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c);
8085 }
8086
8087 __extension__ extern __inline int32_t
8088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8089 __arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
8090 {
8091 return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c);
8092 }
8093
8094 __extension__ extern __inline int16x8_t
8095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8096 __arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
8097 {
8098 return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm);
8099 }
8100
8101 __extension__ extern __inline int16x8_t
8102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8103 __arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
8104 {
8105 return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm);
8106 }
8107
8108 __extension__ extern __inline uint32x4_t
8109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8110 __arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8111 {
8112 return __builtin_mve_vpselq_uv4si (__a, __b, __p);
8113 }
8114
8115 __extension__ extern __inline int32x4_t
8116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8117 __arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8118 {
8119 return __builtin_mve_vpselq_sv4si (__a, __b, __p);
8120 }
8121
8122 __extension__ extern __inline uint32x4_t
8123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8124 __arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8125 {
8126 return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p);
8127 }
8128
8129 __extension__ extern __inline uint32x4_t
8130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8131 __arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8132 {
8133 return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p);
8134 }
8135
8136 __extension__ extern __inline uint32x4_t
8137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8138 __arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
8139 {
8140 return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c);
8141 }
8142
8143 __extension__ extern __inline uint32x4_t
8144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8145 __arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
8146 {
8147 return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c);
8148 }
8149
8150 __extension__ extern __inline uint32_t
8151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8152 __arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8153 {
8154 return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p);
8155 }
8156
8157 __extension__ extern __inline uint32_t
8158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8159 __arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
8160 {
8161 return __builtin_mve_vmladavaq_uv4si (__a, __b, __c);
8162 }
8163
8164 __extension__ extern __inline uint32_t
8165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8166 __arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8167 {
8168 return __builtin_mve_vminvq_p_uv4si (__a, __b, __p);
8169 }
8170
8171 __extension__ extern __inline uint32_t
8172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8173 __arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8174 {
8175 return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p);
8176 }
8177
8178 __extension__ extern __inline uint32x4_t
8179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8180 __arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
8181 {
8182 return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p);
8183 }
8184
8185 __extension__ extern __inline mve_pred16_t
8186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8187 __arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8188 {
8189 return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p);
8190 }
8191
8192 __extension__ extern __inline mve_pred16_t
8193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8194 __arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8195 {
8196 return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p);
8197 }
8198
8199 __extension__ extern __inline mve_pred16_t
8200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8201 __arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8202 {
8203 return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p);
8204 }
8205
8206 __extension__ extern __inline mve_pred16_t
8207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8208 __arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8209 {
8210 return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p);
8211 }
8212
8213 __extension__ extern __inline mve_pred16_t
8214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8215 __arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8216 {
8217 return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p);
8218 }
8219
8220 __extension__ extern __inline mve_pred16_t
8221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8222 __arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8223 {
8224 return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p);
8225 }
8226
8227 __extension__ extern __inline mve_pred16_t
8228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8229 __arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8230 {
8231 return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p);
8232 }
8233
8234 __extension__ extern __inline mve_pred16_t
8235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8236 __arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
8237 {
8238 return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p);
8239 }
8240
8241 __extension__ extern __inline uint32x4_t
8242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8243 __arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
8244 {
8245 return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p);
8246 }
8247
8248 __extension__ extern __inline uint32_t
8249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8250 __arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
8251 {
8252 return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p);
8253 }
8254
8255 __extension__ extern __inline uint32x4_t
8256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8257 __arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
8258 {
8259 return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm);
8260 }
8261
8262 __extension__ extern __inline uint32x4_t
8263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8264 __arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
8265 {
8266 return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm);
8267 }
8268
8269 __extension__ extern __inline uint32x4_t
8270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8271 __arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8272 {
8273 return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p);
8274 }
8275
8276 __extension__ extern __inline uint32x4_t
8277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8278 __arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8279 {
8280 return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p);
8281 }
8282
8283 __extension__ extern __inline uint32x4_t
8284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8285 __arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8286 {
8287 return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p);
8288 }
8289
8290 __extension__ extern __inline uint32x4_t
8291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8292 __arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
8293 {
8294 return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p);
8295 }
8296
8297 __extension__ extern __inline uint32_t
8298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8299 __arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
8300 {
8301 return __builtin_mve_vminavq_p_sv4si (__a, __b, __p);
8302 }
8303
8304 __extension__ extern __inline uint32x4_t
8305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8306 __arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8307 {
8308 return __builtin_mve_vminaq_m_sv4si (__a, __b, __p);
8309 }
8310
8311 __extension__ extern __inline uint32_t
8312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8313 __arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
8314 {
8315 return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p);
8316 }
8317
8318 __extension__ extern __inline uint32x4_t
8319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8320 __arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8321 {
8322 return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p);
8323 }
8324
8325 __extension__ extern __inline mve_pred16_t
8326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8327 __arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8328 {
8329 return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p);
8330 }
8331
8332 __extension__ extern __inline mve_pred16_t
8333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8334 __arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8335 {
8336 return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p);
8337 }
8338
8339 __extension__ extern __inline mve_pred16_t
8340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8341 __arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8342 {
8343 return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p);
8344 }
8345
8346 __extension__ extern __inline mve_pred16_t
8347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8348 __arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8349 {
8350 return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p);
8351 }
8352
8353 __extension__ extern __inline mve_pred16_t
8354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8355 __arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8356 {
8357 return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p);
8358 }
8359
8360 __extension__ extern __inline mve_pred16_t
8361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8362 __arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8363 {
8364 return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p);
8365 }
8366
8367 __extension__ extern __inline mve_pred16_t
8368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8369 __arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8370 {
8371 return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p);
8372 }
8373
8374 __extension__ extern __inline mve_pred16_t
8375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8376 __arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8377 {
8378 return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p);
8379 }
8380
8381 __extension__ extern __inline mve_pred16_t
8382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8383 __arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8384 {
8385 return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p);
8386 }
8387
8388 __extension__ extern __inline mve_pred16_t
8389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8390 __arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8391 {
8392 return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p);
8393 }
8394
8395 __extension__ extern __inline mve_pred16_t
8396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8397 __arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8398 {
8399 return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p);
8400 }
8401
8402 __extension__ extern __inline mve_pred16_t
8403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8404 __arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8405 {
8406 return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p);
8407 }
8408
8409 __extension__ extern __inline int32x4_t
8410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8411 __arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8412 {
8413 return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p);
8414 }
8415
8416 __extension__ extern __inline int32x4_t
8417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8418 __arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8419 {
8420 return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p);
8421 }
8422
8423 __extension__ extern __inline int32x4_t
8424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8425 __arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8426 {
8427 return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p);
8428 }
8429
8430 __extension__ extern __inline int32x4_t
8431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8432 __arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8433 {
8434 return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p);
8435 }
8436
8437 __extension__ extern __inline int32x4_t
8438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8439 __arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
8440 {
8441 return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p);
8442 }
8443
8444 __extension__ extern __inline int32x4_t
8445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8446 __arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8447 {
8448 return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p);
8449 }
8450
8451 __extension__ extern __inline int32x4_t
8452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8453 __arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8454 {
8455 return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p);
8456 }
8457
8458 __extension__ extern __inline int32x4_t
8459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8460 __arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8461 {
8462 return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p);
8463 }
8464
8465 __extension__ extern __inline int32x4_t
8466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8467 __arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8468 {
8469 return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p);
8470 }
8471
8472 __extension__ extern __inline int32_t
8473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8474 __arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8475 {
8476 return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p);
8477 }
8478
8479 __extension__ extern __inline int32_t
8480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8481 __arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8482 {
8483 return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p);
8484 }
8485
8486 __extension__ extern __inline int32_t
8487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8488 __arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8489 {
8490 return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p);
8491 }
8492
8493 __extension__ extern __inline int32_t
8494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8495 __arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8496 {
8497 return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p);
8498 }
8499
8500 __extension__ extern __inline int32_t
8501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8502 __arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8503 {
8504 return __builtin_mve_vminvq_p_sv4si (__a, __b, __p);
8505 }
8506
8507 __extension__ extern __inline int32_t
8508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8509 __arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8510 {
8511 return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p);
8512 }
8513
8514 __extension__ extern __inline int32x4_t
8515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8516 __arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
8517 {
8518 return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p);
8519 }
8520
8521 __extension__ extern __inline int32x4_t
8522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8523 __arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8524 {
8525 return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p);
8526 }
8527
8528 __extension__ extern __inline int32x4_t
8529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8530 __arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8531 {
8532 return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p);
8533 }
8534
8535 __extension__ extern __inline int32_t
8536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8537 __arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
8538 {
8539 return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p);
8540 }
8541
8542 __extension__ extern __inline int32x4_t
8543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8544 __arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
8545 {
8546 return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p);
8547 }
8548
8549 __extension__ extern __inline int32x4_t
8550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8551 __arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8552 {
8553 return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b);
8554 }
8555
8556 __extension__ extern __inline int32x4_t
8557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8558 __arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8559 {
8560 return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b);
8561 }
8562
8563 __extension__ extern __inline int32x4_t
8564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8565 __arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8566 {
8567 return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c);
8568 }
8569
8570 __extension__ extern __inline int32x4_t
8571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8572 __arm_vqdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8573 {
8574 return __builtin_mve_vqdmlashq_n_sv4si (__a, __b, __c);
8575 }
8576
8577 __extension__ extern __inline int32x4_t
8578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8579 __arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8580 {
8581 return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c);
8582 }
8583
8584 __extension__ extern __inline int32x4_t
8585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8586 __arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8587 {
8588 return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b);
8589 }
8590
8591 __extension__ extern __inline int32x4_t
8592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8593 __arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8594 {
8595 return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b);
8596 }
8597
8598 __extension__ extern __inline int32x4_t
8599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8600 __arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8601 {
8602 return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b);
8603 }
8604
8605 __extension__ extern __inline int32x4_t
8606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8607 __arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8608 {
8609 return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b);
8610 }
8611
8612 __extension__ extern __inline int32x4_t
8613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8614 __arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8615 {
8616 return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c);
8617 }
8618
8619 __extension__ extern __inline int32x4_t
8620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8621 __arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8622 {
8623 return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b);
8624 }
8625
8626 __extension__ extern __inline int32x4_t
8627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8628 __arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
8629 {
8630 return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b);
8631 }
8632
8633 __extension__ extern __inline int32_t
8634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8635 __arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8636 {
8637 return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c);
8638 }
8639
8640 __extension__ extern __inline int32_t
8641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8642 __arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8643 {
8644 return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c);
8645 }
8646
8647 __extension__ extern __inline int32x4_t
8648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8649 __arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8650 {
8651 return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c);
8652 }
8653
8654 __extension__ extern __inline int32x4_t
8655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8656 __arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8657 {
8658 return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c);
8659 }
8660
8661 __extension__ extern __inline int32_t
8662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8663 __arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8664 {
8665 return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c);
8666 }
8667
8668 __extension__ extern __inline int32_t
8669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8670 __arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
8671 {
8672 return __builtin_mve_vmladavaq_sv4si (__a, __b, __c);
8673 }
8674
8675 __extension__ extern __inline int32x4_t
8676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8677 __arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
8678 {
8679 return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm);
8680 }
8681
8682 __extension__ extern __inline int32x4_t
8683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8684 __arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
8685 {
8686 return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm);
8687 }
8688
8689 __extension__ extern __inline uint64x2_t
8690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8691 __arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
8692 {
8693 return __builtin_mve_vpselq_uv2di (__a, __b, __p);
8694 }
8695
8696 __extension__ extern __inline int64x2_t
8697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8698 __arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
8699 {
8700 return __builtin_mve_vpselq_sv2di (__a, __b, __p);
8701 }
8702
8703 __extension__ extern __inline int64_t
8704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8705 __arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8706 {
8707 return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c);
8708 }
8709
8710 __extension__ extern __inline int64_t
8711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8712 __arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8713 {
8714 return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c);
8715 }
8716
8717 __extension__ extern __inline int64_t
8718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8719 __arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
8720 {
8721 return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c);
8722 }
8723
8724 __extension__ extern __inline int64_t
8725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8726 __arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p)
8727 {
8728 return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p);
8729 }
8730
8731 __extension__ extern __inline int8x16_t
8732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8733 __arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
8734 {
8735 return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p);
8736 }
8737
8738 __extension__ extern __inline int64_t
8739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8740 __arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8741 {
8742 return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p);
8743 }
8744
8745 __extension__ extern __inline int64_t
8746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8747 __arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8748 {
8749 return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p);
8750 }
8751
8752 __extension__ extern __inline int64_t
8753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8754 __arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8755 {
8756 return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p);
8757 }
8758
8759 __extension__ extern __inline int64_t
8760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8761 __arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
8762 {
8763 return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p);
8764 }
8765
8766 __extension__ extern __inline uint64_t
8767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8768 __arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
8769 {
8770 return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p);
8771 }
8772
8773 __extension__ extern __inline uint8x16_t
8774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8775 __arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
8776 {
8777 return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p);
8778 }
8779
8780 __extension__ extern __inline uint64_t
8781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8782 __arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
8783 {
8784 return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p);
8785 }
8786
8787 __extension__ extern __inline int16x8_t
8788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8789 __arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
8790 {
8791 return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p);
8792 }
8793
8794 __extension__ extern __inline int16x8_t
8795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8796 __arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
8797 {
8798 return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p);
8799 }
8800
8801 __extension__ extern __inline int8x16_t
8802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8803 __arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8804 {
8805 return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm);
8806 }
8807
8808 __extension__ extern __inline int8x16_t
8809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8810 __arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8811 {
8812 return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm);
8813 }
8814
8815 __extension__ extern __inline int8x16_t
8816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8817 __arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8818 {
8819 return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm);
8820 }
8821
8822 __extension__ extern __inline int8x16_t
8823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8824 __arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8825 {
8826 return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm);
8827 }
8828
8829 __extension__ extern __inline int8x16_t
8830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8831 __arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8832 {
8833 return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm);
8834 }
8835
8836 __extension__ extern __inline int8x16_t
8837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8838 __arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8839 {
8840 return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm);
8841 }
8842
8843 __extension__ extern __inline int8x16_t
8844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8845 __arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
8846 {
8847 return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm);
8848 }
8849
8850 __extension__ extern __inline int64_t
8851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8852 __arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8853 {
8854 return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c);
8855 }
8856
8857 __extension__ extern __inline int64_t
8858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8859 __arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8860 {
8861 return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c);
8862 }
8863
8864 __extension__ extern __inline int64_t
8865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8866 __arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8867 {
8868 return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c);
8869 }
8870
8871 __extension__ extern __inline int64_t
8872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8873 __arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
8874 {
8875 return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c);
8876 }
8877
8878 __extension__ extern __inline int64_t
8879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8880 __arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8881 {
8882 return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p);
8883 }
8884
8885 __extension__ extern __inline int64_t
8886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8887 __arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8888 {
8889 return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p);
8890 }
8891
8892 __extension__ extern __inline int64_t
8893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8894 __arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8895 {
8896 return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p);
8897 }
8898
8899 __extension__ extern __inline int64_t
8900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8901 __arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
8902 {
8903 return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p);
8904 }
8905
8906 __extension__ extern __inline int16x8_t
8907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8908 __arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
8909 {
8910 return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p);
8911 }
8912
8913 __extension__ extern __inline int16x8_t
8914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8915 __arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
8916 {
8917 return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p);
8918 }
8919
8920 __extension__ extern __inline int8x16_t
8921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8922 __arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8923 {
8924 return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p);
8925 }
8926
8927 __extension__ extern __inline int8x16_t
8928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8929 __arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8930 {
8931 return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p);
8932 }
8933
8934 __extension__ extern __inline int8x16_t
8935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8936 __arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8937 {
8938 return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p);
8939 }
8940
8941 __extension__ extern __inline int8x16_t
8942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8943 __arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8944 {
8945 return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p);
8946 }
8947
8948 __extension__ extern __inline int8x16_t
8949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8950 __arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
8951 {
8952 return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p);
8953 }
8954
8955 __extension__ extern __inline uint16x8_t
8956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8957 __arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
8958 {
8959 return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p);
8960 }
8961
8962 __extension__ extern __inline uint16x8_t
8963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8964 __arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
8965 {
8966 return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p);
8967 }
8968
8969 __extension__ extern __inline uint8x16_t
8970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8971 __arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8972 {
8973 return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm);
8974 }
8975
8976 __extension__ extern __inline uint8x16_t
8977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8978 __arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8979 {
8980 return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm);
8981 }
8982
8983 __extension__ extern __inline uint8x16_t
8984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8985 __arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
8986 {
8987 return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm);
8988 }
8989
8990 __extension__ extern __inline uint8x16_t
8991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8992 __arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
8993 {
8994 return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p);
8995 }
8996
8997 __extension__ extern __inline uint8x16_t
8998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8999 __arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
9000 {
9001 return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p);
9002 }
9003
9004 __extension__ extern __inline uint8x16_t
9005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9006 __arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9007 {
9008 return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm);
9009 }
9010
9011 __extension__ extern __inline uint8x16_t
9012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9013 __arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9014 {
9015 return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm);
9016 }
9017
9018 __extension__ extern __inline uint8x16_t
9019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9020 __arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9021 {
9022 return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm);
9023 }
9024
9025 __extension__ extern __inline uint8x16_t
9026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9027 __arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9028 {
9029 return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm);
9030 }
9031
9032 __extension__ extern __inline uint8x16_t
9033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9034 __arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9035 {
9036 return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm);
9037 }
9038
9039 __extension__ extern __inline uint8x16_t
9040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9041 __arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9042 {
9043 return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm);
9044 }
9045
9046 __extension__ extern __inline uint8x16_t
9047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9048 __arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
9049 {
9050 return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm);
9051 }
9052
9053 __extension__ extern __inline uint64_t
9054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9055 __arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
9056 {
9057 return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c);
9058 }
9059
9060 __extension__ extern __inline uint64_t
9061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9062 __arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9063 {
9064 return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p);
9065 }
9066
9067 __extension__ extern __inline uint16x8_t
9068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9069 __arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9070 {
9071 return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p);
9072 }
9073
9074 __extension__ extern __inline uint16x8_t
9075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9076 __arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9077 {
9078 return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p);
9079 }
9080
9081 __extension__ extern __inline uint8x16_t
9082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9083 __arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9084 {
9085 return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p);
9086 }
9087
9088 __extension__ extern __inline uint8x16_t
9089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9090 __arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9091 {
9092 return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p);
9093 }
9094
9095 __extension__ extern __inline uint8x16_t
9096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9097 __arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9098 {
9099 return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p);
9100 }
9101
9102 __extension__ extern __inline uint8x16_t
9103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9104 __arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
9105 {
9106 return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p);
9107 }
9108
9109 __extension__ extern __inline uint8x16_t
9110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9111 __arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
9112 {
9113 return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p);
9114 }
9115
9116 __extension__ extern __inline int32x4_t
9117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9118 __arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
9119 {
9120 return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p);
9121 }
9122
9123 __extension__ extern __inline int32x4_t
9124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9125 __arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
9126 {
9127 return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p);
9128 }
9129
9130 __extension__ extern __inline int16x8_t
9131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9132 __arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9133 {
9134 return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm);
9135 }
9136
9137 __extension__ extern __inline int16x8_t
9138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9139 __arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9140 {
9141 return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm);
9142 }
9143
9144 __extension__ extern __inline int16x8_t
9145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9146 __arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9147 {
9148 return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm);
9149 }
9150
9151 __extension__ extern __inline int16x8_t
9152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9153 __arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9154 {
9155 return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm);
9156 }
9157
9158 __extension__ extern __inline int16x8_t
9159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9160 __arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9161 {
9162 return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm);
9163 }
9164
9165 __extension__ extern __inline int16x8_t
9166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9167 __arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9168 {
9169 return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm);
9170 }
9171
9172 __extension__ extern __inline int16x8_t
9173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9174 __arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
9175 {
9176 return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm);
9177 }
9178
9179 __extension__ extern __inline int64_t
9180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9181 __arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9182 {
9183 return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c);
9184 }
9185
9186 __extension__ extern __inline int64_t
9187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9188 __arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9189 {
9190 return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c);
9191 }
9192
9193 __extension__ extern __inline int64_t
9194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9195 __arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9196 {
9197 return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c);
9198 }
9199
9200 __extension__ extern __inline int64_t
9201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9202 __arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
9203 {
9204 return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c);
9205 }
9206
9207 __extension__ extern __inline int64_t
9208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9209 __arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9210 {
9211 return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p);
9212 }
9213
9214 __extension__ extern __inline int64_t
9215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9216 __arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9217 {
9218 return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p);
9219 }
9220
9221 __extension__ extern __inline int64_t
9222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9223 __arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9224 {
9225 return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p);
9226 }
9227
9228 __extension__ extern __inline int64_t
9229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9230 __arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9231 {
9232 return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p);
9233 }
9234
9235 __extension__ extern __inline int32x4_t
9236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9237 __arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
9238 {
9239 return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p);
9240 }
9241
9242 __extension__ extern __inline int32x4_t
9243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9244 __arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
9245 {
9246 return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p);
9247 }
9248
9249 __extension__ extern __inline int16x8_t
9250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9251 __arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9252 {
9253 return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p);
9254 }
9255
9256 __extension__ extern __inline int16x8_t
9257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9258 __arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9259 {
9260 return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p);
9261 }
9262
9263 __extension__ extern __inline int16x8_t
9264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9265 __arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9266 {
9267 return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p);
9268 }
9269
9270 __extension__ extern __inline int16x8_t
9271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9272 __arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9273 {
9274 return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p);
9275 }
9276
9277 __extension__ extern __inline int16x8_t
9278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9279 __arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
9280 {
9281 return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p);
9282 }
9283
9284 __extension__ extern __inline uint32x4_t
9285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9286 __arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
9287 {
9288 return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p);
9289 }
9290
9291 __extension__ extern __inline uint32x4_t
9292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9293 __arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
9294 {
9295 return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p);
9296 }
9297
9298 __extension__ extern __inline uint16x8_t
9299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9300 __arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9301 {
9302 return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm);
9303 }
9304
9305 __extension__ extern __inline uint16x8_t
9306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9307 __arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9308 {
9309 return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm);
9310 }
9311
9312 __extension__ extern __inline uint16x8_t
9313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9314 __arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
9315 {
9316 return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm);
9317 }
9318
9319 __extension__ extern __inline uint16x8_t
9320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9321 __arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9322 {
9323 return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p);
9324 }
9325
9326 __extension__ extern __inline uint16x8_t
9327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9328 __arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
9329 {
9330 return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p);
9331 }
9332
9333 __extension__ extern __inline uint16x8_t
9334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9335 __arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9336 {
9337 return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm);
9338 }
9339
9340 __extension__ extern __inline uint16x8_t
9341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9342 __arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9343 {
9344 return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm);
9345 }
9346
9347 __extension__ extern __inline uint16x8_t
9348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9349 __arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9350 {
9351 return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm);
9352 }
9353
9354 __extension__ extern __inline uint16x8_t
9355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9356 __arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9357 {
9358 return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm);
9359 }
9360
9361 __extension__ extern __inline uint16x8_t
9362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9363 __arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9364 {
9365 return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm);
9366 }
9367
9368 __extension__ extern __inline uint16x8_t
9369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9370 __arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9371 {
9372 return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm);
9373 }
9374
9375 __extension__ extern __inline uint16x8_t
9376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9377 __arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
9378 {
9379 return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm);
9380 }
9381
9382 __extension__ extern __inline uint64_t
9383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9384 __arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
9385 {
9386 return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c);
9387 }
9388
9389 __extension__ extern __inline uint64_t
9390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9391 __arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9392 {
9393 return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p);
9394 }
9395
9396 __extension__ extern __inline uint32x4_t
9397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9398 __arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9399 {
9400 return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p);
9401 }
9402
9403 __extension__ extern __inline uint32x4_t
9404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9405 __arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9406 {
9407 return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p);
9408 }
9409
9410 __extension__ extern __inline uint16x8_t
9411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9412 __arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9413 {
9414 return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p);
9415 }
9416
9417 __extension__ extern __inline uint16x8_t
9418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9419 __arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9420 {
9421 return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p);
9422 }
9423
9424 __extension__ extern __inline uint16x8_t
9425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9426 __arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9427 {
9428 return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p);
9429 }
9430
9431 __extension__ extern __inline uint16x8_t
9432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9433 __arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
9434 {
9435 return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p);
9436 }
9437
9438 __extension__ extern __inline uint16x8_t
9439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9440 __arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
9441 {
9442 return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p);
9443 }
9444
9445 __extension__ extern __inline int8x16_t
9446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9447 __arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
9448 {
9449 return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p);
9450 }
9451
9452 __extension__ extern __inline int8x16_t
9453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9454 __arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9455 {
9456 return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p);
9457 }
9458
9459 __extension__ extern __inline uint8x16_t
9460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9461 __arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
9462 {
9463 return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p);
9464 }
9465
9466 __extension__ extern __inline uint32_t
9467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9468 __arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
9469 {
9470 return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p);
9471 }
9472
9473 __extension__ extern __inline uint8x16_t
9474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9475 __arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
9476 {
9477 return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p);
9478 }
9479
9480 __extension__ extern __inline uint8x16_t
9481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9482 __arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9483 {
9484 return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p);
9485 }
9486
9487 __extension__ extern __inline uint8x16_t
9488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9489 __arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9490 {
9491 return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p);
9492 }
9493
9494 __extension__ extern __inline uint32_t
9495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9496 __arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
9497 {
9498 return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p);
9499 }
9500
9501 __extension__ extern __inline int8x16_t
9502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9503 __arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9504 {
9505 return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p);
9506 }
9507
9508 __extension__ extern __inline int16x8_t
9509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9510 __arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
9511 {
9512 return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p);
9513 }
9514
9515 __extension__ extern __inline int16x8_t
9516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9517 __arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9518 {
9519 return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p);
9520 }
9521
9522 __extension__ extern __inline uint16x8_t
9523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9524 __arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
9525 {
9526 return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p);
9527 }
9528
9529 __extension__ extern __inline uint32_t
9530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9531 __arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
9532 {
9533 return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p);
9534 }
9535
9536 __extension__ extern __inline uint16x8_t
9537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9538 __arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
9539 {
9540 return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p);
9541 }
9542
9543 __extension__ extern __inline uint16x8_t
9544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9545 __arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9546 {
9547 return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p);
9548 }
9549
9550 __extension__ extern __inline uint16x8_t
9551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9552 __arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9553 {
9554 return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p);
9555 }
9556
9557 __extension__ extern __inline uint32_t
9558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9559 __arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
9560 {
9561 return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p);
9562 }
9563
9564 __extension__ extern __inline int16x8_t
9565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9566 __arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9567 {
9568 return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p);
9569 }
9570
9571 __extension__ extern __inline int32x4_t
9572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9573 __arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
9574 {
9575 return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p);
9576 }
9577
9578 __extension__ extern __inline int32x4_t
9579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9580 __arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9581 {
9582 return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p);
9583 }
9584
9585 __extension__ extern __inline uint32x4_t
9586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9587 __arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
9588 {
9589 return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p);
9590 }
9591
9592 __extension__ extern __inline uint32_t
9593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9594 __arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
9595 {
9596 return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p);
9597 }
9598
9599 __extension__ extern __inline uint32x4_t
9600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9601 __arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
9602 {
9603 return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p);
9604 }
9605
9606 __extension__ extern __inline uint32x4_t
9607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9608 __arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9609 {
9610 return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p);
9611 }
9612
9613 __extension__ extern __inline uint32x4_t
9614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9615 __arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9616 {
9617 return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p);
9618 }
9619
9620 __extension__ extern __inline uint32_t
9621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9622 __arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
9623 {
9624 return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p);
9625 }
9626
9627 __extension__ extern __inline int32x4_t
9628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9629 __arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9630 {
9631 return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p);
9632 }
9633
9634 __extension__ extern __inline int8x16_t
9635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9636 __arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9637 {
9638 return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p);
9639 }
9640
9641 __extension__ extern __inline int32x4_t
9642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9643 __arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9644 {
9645 return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p);
9646 }
9647
9648 __extension__ extern __inline int16x8_t
9649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9650 __arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9651 {
9652 return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p);
9653 }
9654
9655 __extension__ extern __inline uint8x16_t
9656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9657 __arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9658 {
9659 return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p);
9660 }
9661
9662 __extension__ extern __inline uint32x4_t
9663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9664 __arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9665 {
9666 return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p);
9667 }
9668
9669 __extension__ extern __inline uint16x8_t
9670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9671 __arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9672 {
9673 return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
9674 }
9675
9676 __extension__ extern __inline int8x16_t
9677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9678 __arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
9679 {
9680 return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p);
9681 }
9682
9683 __extension__ extern __inline int32x4_t
9684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9685 __arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
9686 {
9687 return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p);
9688 }
9689
9690 __extension__ extern __inline int16x8_t
9691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9692 __arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
9693 {
9694 return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p);
9695 }
9696
9697 __extension__ extern __inline uint8x16_t
9698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9699 __arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
9700 {
9701 return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p);
9702 }
9703
9704 __extension__ extern __inline uint32x4_t
9705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9706 __arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
9707 {
9708 return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p);
9709 }
9710
9711 __extension__ extern __inline uint16x8_t
9712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9713 __arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
9714 {
9715 return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p);
9716 }
9717
9718 __extension__ extern __inline int8x16_t
9719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9720 __arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9721 {
9722 return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p);
9723 }
9724
9725 __extension__ extern __inline int32x4_t
9726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9727 __arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9728 {
9729 return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p);
9730 }
9731
9732 __extension__ extern __inline int16x8_t
9733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9734 __arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9735 {
9736 return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p);
9737 }
9738
9739 __extension__ extern __inline uint8x16_t
9740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9741 __arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9742 {
9743 return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p);
9744 }
9745
9746 __extension__ extern __inline uint32x4_t
9747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9748 __arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9749 {
9750 return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p);
9751 }
9752
9753 __extension__ extern __inline uint16x8_t
9754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9755 __arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9756 {
9757 return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p);
9758 }
9759
9760 __extension__ extern __inline int8x16_t
9761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9762 __arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9763 {
9764 return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p);
9765 }
9766
9767 __extension__ extern __inline int32x4_t
9768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9769 __arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9770 {
9771 return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p);
9772 }
9773
9774 __extension__ extern __inline int16x8_t
9775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9776 __arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9777 {
9778 return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p);
9779 }
9780
9781 __extension__ extern __inline uint8x16_t
9782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9783 __arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9784 {
9785 return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p);
9786 }
9787
9788 __extension__ extern __inline uint32x4_t
9789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9790 __arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9791 {
9792 return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p);
9793 }
9794
9795 __extension__ extern __inline uint16x8_t
9796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9797 __arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9798 {
9799 return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p);
9800 }
9801
9802 __extension__ extern __inline int8x16_t
9803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9804 __arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9805 {
9806 return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p);
9807 }
9808
9809 __extension__ extern __inline int32x4_t
9810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9811 __arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9812 {
9813 return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p);
9814 }
9815
9816 __extension__ extern __inline int16x8_t
9817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9818 __arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9819 {
9820 return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p);
9821 }
9822
9823 __extension__ extern __inline uint8x16_t
9824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9825 __arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9826 {
9827 return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p);
9828 }
9829
9830 __extension__ extern __inline uint32x4_t
9831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9832 __arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9833 {
9834 return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p);
9835 }
9836
9837 __extension__ extern __inline uint16x8_t
9838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9839 __arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9840 {
9841 return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p);
9842 }
9843
9844 __extension__ extern __inline int8x16_t
9845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9846 __arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
9847 {
9848 return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p);
9849 }
9850
9851 __extension__ extern __inline int32x4_t
9852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9853 __arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
9854 {
9855 return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p);
9856 }
9857
9858 __extension__ extern __inline int16x8_t
9859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9860 __arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
9861 {
9862 return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p);
9863 }
9864
9865 __extension__ extern __inline uint8x16_t
9866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9867 __arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
9868 {
9869 return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p);
9870 }
9871
9872 __extension__ extern __inline uint32x4_t
9873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9874 __arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
9875 {
9876 return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p);
9877 }
9878
9879 __extension__ extern __inline uint16x8_t
9880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9881 __arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
9882 {
9883 return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p);
9884 }
9885
9886 __extension__ extern __inline int8x16_t
9887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9888 __arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9889 {
9890 return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
9891 }
9892
9893 __extension__ extern __inline int32x4_t
9894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9895 __arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9896 {
9897 return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
9898 }
9899
9900 __extension__ extern __inline int16x8_t
9901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9902 __arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9903 {
9904 return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
9905 }
9906
9907 __extension__ extern __inline uint8x16_t
9908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9909 __arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9910 {
9911 return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p);
9912 }
9913
9914 __extension__ extern __inline uint32x4_t
9915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9916 __arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9917 {
9918 return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p);
9919 }
9920
9921 __extension__ extern __inline uint16x8_t
9922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9923 __arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9924 {
9925 return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p);
9926 }
9927
9928 __extension__ extern __inline int8x16_t
9929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9930 __arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9931 {
9932 return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
9933 }
9934
9935 __extension__ extern __inline int32x4_t
9936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9937 __arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9938 {
9939 return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
9940 }
9941
9942 __extension__ extern __inline int16x8_t
9943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9944 __arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9945 {
9946 return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
9947 }
9948
9949 __extension__ extern __inline uint8x16_t
9950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9951 __arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9952 {
9953 return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p);
9954 }
9955
9956 __extension__ extern __inline uint32x4_t
9957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9958 __arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
9959 {
9960 return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p);
9961 }
9962
9963 __extension__ extern __inline uint16x8_t
9964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9965 __arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
9966 {
9967 return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p);
9968 }
9969
9970 __extension__ extern __inline int8x16_t
9971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9972 __arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
9973 {
9974 return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p);
9975 }
9976
9977 __extension__ extern __inline int32x4_t
9978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9979 __arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
9980 {
9981 return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p);
9982 }
9983
9984 __extension__ extern __inline int16x8_t
9985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9986 __arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
9987 {
9988 return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p);
9989 }
9990
9991 __extension__ extern __inline uint8x16_t
9992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9993 __arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
9994 {
9995 return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p);
9996 }
9997
9998 __extension__ extern __inline uint32x4_t
9999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10000 __arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10001 {
10002 return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p);
10003 }
10004
10005 __extension__ extern __inline uint16x8_t
10006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10007 __arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10008 {
10009 return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p);
10010 }
10011
10012 __extension__ extern __inline int8x16_t
10013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10014 __arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10015 {
10016 return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p);
10017 }
10018
10019 __extension__ extern __inline int32x4_t
10020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10021 __arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10022 {
10023 return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p);
10024 }
10025
10026 __extension__ extern __inline int16x8_t
10027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10028 __arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10029 {
10030 return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p);
10031 }
10032
10033 __extension__ extern __inline uint8x16_t
10034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10035 __arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10036 {
10037 return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p);
10038 }
10039
10040 __extension__ extern __inline uint32x4_t
10041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10042 __arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10043 {
10044 return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p);
10045 }
10046
10047 __extension__ extern __inline uint16x8_t
10048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10049 __arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10050 {
10051 return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p);
10052 }
10053
10054 __extension__ extern __inline int8x16_t
10055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10056 __arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10057 {
10058 return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p);
10059 }
10060
10061 __extension__ extern __inline int32x4_t
10062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10063 __arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10064 {
10065 return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p);
10066 }
10067
10068 __extension__ extern __inline int16x8_t
10069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10070 __arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10071 {
10072 return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p);
10073 }
10074
10075 __extension__ extern __inline uint8x16_t
10076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10077 __arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10078 {
10079 return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p);
10080 }
10081
10082 __extension__ extern __inline uint32x4_t
10083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10084 __arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10085 {
10086 return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p);
10087 }
10088
10089 __extension__ extern __inline uint16x8_t
10090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10091 __arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10092 {
10093 return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p);
10094 }
10095
10096 __extension__ extern __inline int8x16_t
10097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10098 __arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10099 {
10100 return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
10101 }
10102
10103 __extension__ extern __inline int32x4_t
10104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10105 __arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10106 {
10107 return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
10108 }
10109
10110 __extension__ extern __inline int16x8_t
10111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10112 __arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10113 {
10114 return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
10115 }
10116
10117 __extension__ extern __inline int8x16_t
10118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10119 __arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10120 {
10121 return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
10122 }
10123
10124 __extension__ extern __inline int32x4_t
10125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10126 __arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10127 {
10128 return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
10129 }
10130
10131 __extension__ extern __inline int16x8_t
10132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10133 __arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10134 {
10135 return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
10136 }
10137
10138 __extension__ extern __inline int8x16_t
10139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10140 __arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10141 {
10142 return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p);
10143 }
10144
10145 __extension__ extern __inline int32x4_t
10146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10147 __arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10148 {
10149 return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p);
10150 }
10151
10152 __extension__ extern __inline int16x8_t
10153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10154 __arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10155 {
10156 return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p);
10157 }
10158
10159 __extension__ extern __inline uint8x16_t
10160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10161 __arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10162 {
10163 return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p);
10164 }
10165
10166 __extension__ extern __inline uint32x4_t
10167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10168 __arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10169 {
10170 return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p);
10171 }
10172
10173 __extension__ extern __inline uint16x8_t
10174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10175 __arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10176 {
10177 return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p);
10178 }
10179
10180 __extension__ extern __inline int8x16_t
10181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10182 __arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10183 {
10184 return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p);
10185 }
10186
10187 __extension__ extern __inline int32x4_t
10188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10189 __arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10190 {
10191 return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p);
10192 }
10193
10194 __extension__ extern __inline int16x8_t
10195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10196 __arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10197 {
10198 return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p);
10199 }
10200
10201 __extension__ extern __inline uint8x16_t
10202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10203 __arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10204 {
10205 return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p);
10206 }
10207
10208 __extension__ extern __inline uint32x4_t
10209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10210 __arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10211 {
10212 return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p);
10213 }
10214
10215 __extension__ extern __inline uint16x8_t
10216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10217 __arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10218 {
10219 return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p);
10220 }
10221
10222 __extension__ extern __inline int8x16_t
10223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10224 __arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10225 {
10226 return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p);
10227 }
10228
10229 __extension__ extern __inline int32x4_t
10230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10231 __arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10232 {
10233 return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p);
10234 }
10235
10236 __extension__ extern __inline int16x8_t
10237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10238 __arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10239 {
10240 return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p);
10241 }
10242
10243 __extension__ extern __inline uint8x16_t
10244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10245 __arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10246 {
10247 return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p);
10248 }
10249
10250 __extension__ extern __inline uint32x4_t
10251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10252 __arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10253 {
10254 return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p);
10255 }
10256
10257 __extension__ extern __inline uint16x8_t
10258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10259 __arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10260 {
10261 return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p);
10262 }
10263
10264 __extension__ extern __inline int8x16_t
10265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10266 __arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10267 {
10268 return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p);
10269 }
10270
10271 __extension__ extern __inline int32x4_t
10272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10273 __arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10274 {
10275 return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p);
10276 }
10277
10278 __extension__ extern __inline int16x8_t
10279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10280 __arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10281 {
10282 return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p);
10283 }
10284
10285 __extension__ extern __inline uint8x16_t
10286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10287 __arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10288 {
10289 return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p);
10290 }
10291
10292 __extension__ extern __inline uint32x4_t
10293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10294 __arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10295 {
10296 return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p);
10297 }
10298
10299 __extension__ extern __inline uint16x8_t
10300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10301 __arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10302 {
10303 return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p);
10304 }
10305
10306 __extension__ extern __inline int32_t
10307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10308 __arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10309 {
10310 return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p);
10311 }
10312
10313 __extension__ extern __inline int32_t
10314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10315 __arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10316 {
10317 return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p);
10318 }
10319
10320 __extension__ extern __inline int32_t
10321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10322 __arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10323 {
10324 return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p);
10325 }
10326
10327 __extension__ extern __inline uint32_t
10328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10329 __arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
10330 {
10331 return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p);
10332 }
10333
10334 __extension__ extern __inline uint32_t
10335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10336 __arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
10337 {
10338 return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p);
10339 }
10340
10341 __extension__ extern __inline uint32_t
10342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10343 __arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
10344 {
10345 return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p);
10346 }
10347
10348 __extension__ extern __inline int32_t
10349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10350 __arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10351 {
10352 return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p);
10353 }
10354
10355 __extension__ extern __inline int32_t
10356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10357 __arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10358 {
10359 return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p);
10360 }
10361
10362 __extension__ extern __inline int32_t
10363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10364 __arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10365 {
10366 return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p);
10367 }
10368
10369 __extension__ extern __inline int8x16_t
10370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10371 __arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10372 {
10373 return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p);
10374 }
10375
10376 __extension__ extern __inline int32x4_t
10377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10378 __arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10379 {
10380 return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p);
10381 }
10382
10383 __extension__ extern __inline int16x8_t
10384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10385 __arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10386 {
10387 return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p);
10388 }
10389
10390 __extension__ extern __inline uint8x16_t
10391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10392 __arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
10393 {
10394 return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p);
10395 }
10396
10397 __extension__ extern __inline uint32x4_t
10398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10399 __arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
10400 {
10401 return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p);
10402 }
10403
10404 __extension__ extern __inline uint16x8_t
10405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10406 __arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
10407 {
10408 return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p);
10409 }
10410
10411 __extension__ extern __inline int8x16_t
10412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10413 __arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10414 {
10415 return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p);
10416 }
10417
10418 __extension__ extern __inline int32x4_t
10419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10420 __arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10421 {
10422 return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p);
10423 }
10424
10425 __extension__ extern __inline int16x8_t
10426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10427 __arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10428 {
10429 return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p);
10430 }
10431
10432 __extension__ extern __inline uint8x16_t
10433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10434 __arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
10435 {
10436 return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p);
10437 }
10438
10439 __extension__ extern __inline uint32x4_t
10440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10441 __arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
10442 {
10443 return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p);
10444 }
10445
10446 __extension__ extern __inline uint16x8_t
10447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10448 __arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
10449 {
10450 return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p);
10451 }
10452
10453 __extension__ extern __inline int32_t
10454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10455 __arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10456 {
10457 return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p);
10458 }
10459
10460 __extension__ extern __inline int32_t
10461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10462 __arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10463 {
10464 return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p);
10465 }
10466
10467 __extension__ extern __inline int32_t
10468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10469 __arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10470 {
10471 return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p);
10472 }
10473
10474 __extension__ extern __inline int32_t
10475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10476 __arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
10477 {
10478 return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p);
10479 }
10480
10481 __extension__ extern __inline int32_t
10482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10483 __arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
10484 {
10485 return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p);
10486 }
10487
10488 __extension__ extern __inline int32_t
10489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10490 __arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
10491 {
10492 return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p);
10493 }
10494
10495 __extension__ extern __inline int8x16_t
10496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10497 __arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10498 {
10499 return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p);
10500 }
10501
10502 __extension__ extern __inline int32x4_t
10503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10504 __arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10505 {
10506 return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p);
10507 }
10508
10509 __extension__ extern __inline int16x8_t
10510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10511 __arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10512 {
10513 return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p);
10514 }
10515
10516 __extension__ extern __inline uint8x16_t
10517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10518 __arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10519 {
10520 return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p);
10521 }
10522
10523 __extension__ extern __inline uint32x4_t
10524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10525 __arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10526 {
10527 return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p);
10528 }
10529
10530 __extension__ extern __inline uint16x8_t
10531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10532 __arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10533 {
10534 return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p);
10535 }
10536
10537 __extension__ extern __inline int16x8_t
10538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10539 __arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10540 {
10541 return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p);
10542 }
10543
10544 __extension__ extern __inline int64x2_t
10545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10546 __arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10547 {
10548 return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p);
10549 }
10550
10551 __extension__ extern __inline int32x4_t
10552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10553 __arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10554 {
10555 return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p);
10556 }
10557
10558 __extension__ extern __inline uint16x8_t
10559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10560 __arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10561 {
10562 return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p);
10563 }
10564
10565 __extension__ extern __inline uint64x2_t
10566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10567 __arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10568 {
10569 return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p);
10570 }
10571
10572 __extension__ extern __inline uint32x4_t
10573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10574 __arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10575 {
10576 return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p);
10577 }
10578
10579 __extension__ extern __inline int16x8_t
10580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10581 __arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10582 {
10583 return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p);
10584 }
10585
10586 __extension__ extern __inline int64x2_t
10587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10588 __arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10589 {
10590 return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p);
10591 }
10592
10593 __extension__ extern __inline int32x4_t
10594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10595 __arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10596 {
10597 return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p);
10598 }
10599
10600 __extension__ extern __inline uint16x8_t
10601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10602 __arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10603 {
10604 return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p);
10605 }
10606
10607 __extension__ extern __inline uint64x2_t
10608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10609 __arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10610 {
10611 return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p);
10612 }
10613
10614 __extension__ extern __inline uint32x4_t
10615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10616 __arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10617 {
10618 return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p);
10619 }
10620
10621 __extension__ extern __inline int8x16_t
10622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10623 __arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10624 {
10625 return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p);
10626 }
10627
10628 __extension__ extern __inline int32x4_t
10629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10630 __arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10631 {
10632 return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p);
10633 }
10634
10635 __extension__ extern __inline int16x8_t
10636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10637 __arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10638 {
10639 return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p);
10640 }
10641
10642 __extension__ extern __inline uint8x16_t
10643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10644 __arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10645 {
10646 return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p);
10647 }
10648
10649 __extension__ extern __inline uint32x4_t
10650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10651 __arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10652 {
10653 return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p);
10654 }
10655
10656 __extension__ extern __inline uint16x8_t
10657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10658 __arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10659 {
10660 return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p);
10661 }
10662
10663 __extension__ extern __inline int8x16_t
10664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10665 __arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10666 {
10667 return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p);
10668 }
10669
10670 __extension__ extern __inline int32x4_t
10671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10672 __arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10673 {
10674 return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p);
10675 }
10676
10677 __extension__ extern __inline int16x8_t
10678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10679 __arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10680 {
10681 return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p);
10682 }
10683
10684 __extension__ extern __inline uint8x16_t
10685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10686 __arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10687 {
10688 return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p);
10689 }
10690
10691 __extension__ extern __inline uint32x4_t
10692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10693 __arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10694 {
10695 return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p);
10696 }
10697
10698 __extension__ extern __inline uint16x8_t
10699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10700 __arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10701 {
10702 return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p);
10703 }
10704
10705 __extension__ extern __inline int8x16_t
10706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10707 __arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10708 {
10709 return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p);
10710 }
10711
10712 __extension__ extern __inline int32x4_t
10713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10714 __arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10715 {
10716 return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p);
10717 }
10718
10719 __extension__ extern __inline int16x8_t
10720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10721 __arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10722 {
10723 return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p);
10724 }
10725
10726 __extension__ extern __inline uint8x16_t
10727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10728 __arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10729 {
10730 return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p);
10731 }
10732
10733 __extension__ extern __inline uint32x4_t
10734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10735 __arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10736 {
10737 return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p);
10738 }
10739
10740 __extension__ extern __inline uint16x8_t
10741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10742 __arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10743 {
10744 return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p);
10745 }
10746
10747 __extension__ extern __inline int8x16_t
10748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10749 __arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10750 {
10751 return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p);
10752 }
10753
10754 __extension__ extern __inline int32x4_t
10755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10756 __arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10757 {
10758 return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p);
10759 }
10760
10761 __extension__ extern __inline int16x8_t
10762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10763 __arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10764 {
10765 return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p);
10766 }
10767
10768 __extension__ extern __inline uint8x16_t
10769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10770 __arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10771 {
10772 return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p);
10773 }
10774
10775 __extension__ extern __inline uint32x4_t
10776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10777 __arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10778 {
10779 return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p);
10780 }
10781
10782 __extension__ extern __inline uint16x8_t
10783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10784 __arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10785 {
10786 return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p);
10787 }
10788
10789 __extension__ extern __inline int8x16_t
10790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10791 __arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10792 {
10793 return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p);
10794 }
10795
10796 __extension__ extern __inline int32x4_t
10797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10798 __arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10799 {
10800 return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p);
10801 }
10802
10803 __extension__ extern __inline int16x8_t
10804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10805 __arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10806 {
10807 return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p);
10808 }
10809
10810 __extension__ extern __inline uint8x16_t
10811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10812 __arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
10813 {
10814 return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p);
10815 }
10816
10817 __extension__ extern __inline uint32x4_t
10818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10819 __arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
10820 {
10821 return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p);
10822 }
10823
10824 __extension__ extern __inline uint16x8_t
10825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10826 __arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
10827 {
10828 return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p);
10829 }
10830
10831 __extension__ extern __inline int8x16_t
10832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10833 __arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10834 {
10835 return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p);
10836 }
10837
10838 __extension__ extern __inline int32x4_t
10839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10840 __arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10841 {
10842 return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p);
10843 }
10844
10845 __extension__ extern __inline int16x8_t
10846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10847 __arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10848 {
10849 return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p);
10850 }
10851
10852 __extension__ extern __inline uint8x16_t
10853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10854 __arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
10855 {
10856 return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p);
10857 }
10858
10859 __extension__ extern __inline uint32x4_t
10860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10861 __arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
10862 {
10863 return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p);
10864 }
10865
10866 __extension__ extern __inline uint16x8_t
10867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10868 __arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
10869 {
10870 return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p);
10871 }
10872
10873 __extension__ extern __inline int8x16_t
10874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10875 __arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10876 {
10877 return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p);
10878 }
10879
10880 __extension__ extern __inline int32x4_t
10881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10882 __arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10883 {
10884 return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p);
10885 }
10886
10887 __extension__ extern __inline int16x8_t
10888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10889 __arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10890 {
10891 return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p);
10892 }
10893
10894 __extension__ extern __inline int8x16_t
10895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10896 __arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10897 {
10898 return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
10899 }
10900
10901 __extension__ extern __inline int32x4_t
10902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10903 __arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10904 {
10905 return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p);
10906 }
10907
10908 __extension__ extern __inline int16x8_t
10909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10910 __arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10911 {
10912 return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
10913 }
10914
10915 __extension__ extern __inline int8x16_t
10916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10917 __arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
10918 {
10919 return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p);
10920 }
10921
10922 __extension__ extern __inline int32x4_t
10923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10924 __arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
10925 {
10926 return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p);
10927 }
10928
10929 __extension__ extern __inline int16x8_t
10930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10931 __arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
10932 {
10933 return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p);
10934 }
10935
10936 __extension__ extern __inline int8x16_t
10937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10938 __arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10939 {
10940 return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
10941 }
10942
10943 __extension__ extern __inline int32x4_t
10944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10945 __arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10946 {
10947 return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
10948 }
10949
10950 __extension__ extern __inline int16x8_t
10951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10952 __arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10953 {
10954 return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
10955 }
10956
10957 __extension__ extern __inline int8x16_t
10958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10959 __arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
10960 {
10961 return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
10962 }
10963
10964 __extension__ extern __inline int32x4_t
10965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10966 __arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
10967 {
10968 return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
10969 }
10970
10971 __extension__ extern __inline int16x8_t
10972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10973 __arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
10974 {
10975 return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
10976 }
10977
10978 __extension__ extern __inline int8x16_t
10979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10980 __arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
10981 {
10982 return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
10983 }
10984
10985 __extension__ extern __inline int32x4_t
10986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10987 __arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
10988 {
10989 return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
10990 }
10991
10992 __extension__ extern __inline int16x8_t
10993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10994 __arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
10995 {
10996 return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
10997 }
10998
10999 __extension__ extern __inline int8x16_t
11000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11001 __arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11002 {
11003 return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p);
11004 }
11005
11006 __extension__ extern __inline int32x4_t
11007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11008 __arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11009 {
11010 return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p);
11011 }
11012
11013 __extension__ extern __inline int16x8_t
11014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11015 __arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11016 {
11017 return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p);
11018 }
11019
11020 __extension__ extern __inline int8x16_t
11021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11022 __arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11023 {
11024 return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p);
11025 }
11026
11027 __extension__ extern __inline int32x4_t
11028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11029 __arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11030 {
11031 return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p);
11032 }
11033
11034 __extension__ extern __inline int16x8_t
11035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11036 __arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11037 {
11038 return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p);
11039 }
11040
11041 __extension__ extern __inline int8x16_t
11042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11043 __arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11044 {
11045 return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
11046 }
11047
11048 __extension__ extern __inline int32x4_t
11049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11050 __arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11051 {
11052 return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p);
11053 }
11054
11055 __extension__ extern __inline int16x8_t
11056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11057 __arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11058 {
11059 return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
11060 }
11061
11062 __extension__ extern __inline int8x16_t
11063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11064 __arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11065 {
11066 return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p);
11067 }
11068
11069 __extension__ extern __inline int32x4_t
11070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11071 __arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11072 {
11073 return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p);
11074 }
11075
11076 __extension__ extern __inline int16x8_t
11077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11078 __arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11079 {
11080 return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p);
11081 }
11082
11083 __extension__ extern __inline int8x16_t
11084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11085 __arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11086 {
11087 return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p);
11088 }
11089
11090 __extension__ extern __inline int32x4_t
11091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11092 __arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11093 {
11094 return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p);
11095 }
11096
11097 __extension__ extern __inline int16x8_t
11098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11099 __arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11100 {
11101 return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p);
11102 }
11103
11104 __extension__ extern __inline int8x16_t
11105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11106 __arm_vqdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
11107 {
11108 return __builtin_mve_vqdmlashq_m_n_sv16qi (__a, __b, __c, __p);
11109 }
11110
11111 __extension__ extern __inline int16x8_t
11112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11113 __arm_vqdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
11114 {
11115 return __builtin_mve_vqdmlashq_m_n_sv8hi (__a, __b, __c, __p);
11116 }
11117
11118 __extension__ extern __inline int32x4_t
11119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11120 __arm_vqdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
11121 {
11122 return __builtin_mve_vqdmlashq_m_n_sv4si (__a, __b, __c, __p);
11123 }
11124
11125 __extension__ extern __inline int8x16_t
11126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11127 __arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11128 {
11129 return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
11130 }
11131
11132 __extension__ extern __inline int32x4_t
11133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11134 __arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11135 {
11136 return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
11137 }
11138
11139 __extension__ extern __inline int16x8_t
11140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11141 __arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11142 {
11143 return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
11144 }
11145
11146 __extension__ extern __inline int8x16_t
11147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11148 __arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11149 {
11150 return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
11151 }
11152
11153 __extension__ extern __inline int32x4_t
11154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11155 __arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11156 {
11157 return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
11158 }
11159
11160 __extension__ extern __inline int16x8_t
11161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11162 __arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11163 {
11164 return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
11165 }
11166
11167 __extension__ extern __inline int8x16_t
11168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11169 __arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11170 {
11171 return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
11172 }
11173
11174 __extension__ extern __inline int32x4_t
11175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11176 __arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11177 {
11178 return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
11179 }
11180
11181 __extension__ extern __inline int16x8_t
11182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11183 __arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11184 {
11185 return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
11186 }
11187
11188 __extension__ extern __inline int8x16_t
11189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11190 __arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11191 {
11192 return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p);
11193 }
11194
11195 __extension__ extern __inline int32x4_t
11196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11197 __arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11198 {
11199 return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p);
11200 }
11201
11202 __extension__ extern __inline int16x8_t
11203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11204 __arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11205 {
11206 return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p);
11207 }
11208
11209 __extension__ extern __inline int8x16_t
11210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11211 __arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11212 {
11213 return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p);
11214 }
11215
11216 __extension__ extern __inline int32x4_t
11217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11218 __arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11219 {
11220 return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p);
11221 }
11222
11223 __extension__ extern __inline int16x8_t
11224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11225 __arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11226 {
11227 return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p);
11228 }
11229
11230 __extension__ extern __inline uint8x16_t
11231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11232 __arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11233 {
11234 return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p);
11235 }
11236
11237 __extension__ extern __inline uint32x4_t
11238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11239 __arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11240 {
11241 return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p);
11242 }
11243
11244 __extension__ extern __inline uint16x8_t
11245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11246 __arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11247 {
11248 return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p);
11249 }
11250
11251 __extension__ extern __inline int8x16_t
11252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11253 __arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11254 {
11255 return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
11256 }
11257
11258 __extension__ extern __inline int32x4_t
11259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11260 __arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11261 {
11262 return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p);
11263 }
11264
11265 __extension__ extern __inline int16x8_t
11266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11267 __arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11268 {
11269 return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
11270 }
11271
11272 __extension__ extern __inline uint8x16_t
11273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11274 __arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11275 {
11276 return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
11277 }
11278
11279 __extension__ extern __inline uint32x4_t
11280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11281 __arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11282 {
11283 return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p);
11284 }
11285
11286 __extension__ extern __inline uint16x8_t
11287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11288 __arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11289 {
11290 return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
11291 }
11292
11293 __extension__ extern __inline int8x16_t
11294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11295 __arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11296 {
11297 return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p);
11298 }
11299
11300 __extension__ extern __inline int32x4_t
11301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11302 __arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11303 {
11304 return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p);
11305 }
11306
11307 __extension__ extern __inline int16x8_t
11308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11309 __arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11310 {
11311 return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p);
11312 }
11313
11314 __extension__ extern __inline uint8x16_t
11315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11316 __arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11317 {
11318 return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p);
11319 }
11320
11321 __extension__ extern __inline uint32x4_t
11322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11323 __arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11324 {
11325 return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p);
11326 }
11327
11328 __extension__ extern __inline uint16x8_t
11329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11330 __arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11331 {
11332 return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p);
11333 }
11334
11335 __extension__ extern __inline int8x16_t
11336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11337 __arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11338 {
11339 return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p);
11340 }
11341
11342 __extension__ extern __inline int32x4_t
11343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11344 __arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11345 {
11346 return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p);
11347 }
11348
11349 __extension__ extern __inline int16x8_t
11350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11351 __arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11352 {
11353 return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p);
11354 }
11355
11356 __extension__ extern __inline uint8x16_t
11357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11358 __arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
11359 {
11360 return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p);
11361 }
11362
11363 __extension__ extern __inline uint32x4_t
11364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11365 __arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
11366 {
11367 return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p);
11368 }
11369
11370 __extension__ extern __inline uint16x8_t
11371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11372 __arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
11373 {
11374 return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p);
11375 }
11376
11377 __extension__ extern __inline int8x16_t
11378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11379 __arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11380 {
11381 return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p);
11382 }
11383
11384 __extension__ extern __inline int32x4_t
11385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11386 __arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11387 {
11388 return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p);
11389 }
11390
11391 __extension__ extern __inline int16x8_t
11392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11393 __arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11394 {
11395 return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p);
11396 }
11397
11398 __extension__ extern __inline uint8x16_t
11399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11400 __arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11401 {
11402 return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p);
11403 }
11404
11405 __extension__ extern __inline uint32x4_t
11406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11407 __arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11408 {
11409 return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p);
11410 }
11411
11412 __extension__ extern __inline uint16x8_t
11413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11414 __arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11415 {
11416 return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p);
11417 }
11418
11419 __extension__ extern __inline int8x16_t
11420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11421 __arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11422 {
11423 return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p);
11424 }
11425
11426 __extension__ extern __inline int32x4_t
11427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11428 __arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11429 {
11430 return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p);
11431 }
11432
11433 __extension__ extern __inline int16x8_t
11434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11435 __arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11436 {
11437 return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p);
11438 }
11439
11440 __extension__ extern __inline uint8x16_t
11441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11442 __arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11443 {
11444 return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p);
11445 }
11446
11447 __extension__ extern __inline uint32x4_t
11448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11449 __arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11450 {
11451 return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p);
11452 }
11453
11454 __extension__ extern __inline uint16x8_t
11455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11456 __arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11457 {
11458 return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p);
11459 }
11460
11461 __extension__ extern __inline int8x16_t
11462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11463 __arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11464 {
11465 return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p);
11466 }
11467
11468 __extension__ extern __inline int32x4_t
11469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11470 __arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11471 {
11472 return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p);
11473 }
11474
11475 __extension__ extern __inline int16x8_t
11476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11477 __arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11478 {
11479 return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p);
11480 }
11481
11482 __extension__ extern __inline uint8x16_t
11483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11484 __arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11485 {
11486 return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p);
11487 }
11488
11489 __extension__ extern __inline uint32x4_t
11490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11491 __arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
11492 {
11493 return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p);
11494 }
11495
11496 __extension__ extern __inline uint16x8_t
11497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11498 __arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11499 {
11500 return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p);
11501 }
11502
11503 __extension__ extern __inline int8x16_t
11504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11505 __arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11506 {
11507 return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p);
11508 }
11509
11510 __extension__ extern __inline int32x4_t
11511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11512 __arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11513 {
11514 return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p);
11515 }
11516
11517 __extension__ extern __inline int16x8_t
11518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11519 __arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11520 {
11521 return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p);
11522 }
11523
11524 __extension__ extern __inline uint8x16_t
11525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11526 __arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
11527 {
11528 return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p);
11529 }
11530
11531 __extension__ extern __inline uint32x4_t
11532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11533 __arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11534 {
11535 return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p);
11536 }
11537
11538 __extension__ extern __inline uint16x8_t
11539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11540 __arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11541 {
11542 return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p);
11543 }
11544
11545 __extension__ extern __inline int8x16_t
11546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11547 __arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11548 {
11549 return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
11550 }
11551
11552 __extension__ extern __inline int32x4_t
11553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11554 __arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11555 {
11556 return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p);
11557 }
11558
11559 __extension__ extern __inline int16x8_t
11560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11561 __arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11562 {
11563 return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
11564 }
11565
11566 __extension__ extern __inline uint8x16_t
11567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11568 __arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11569 {
11570 return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
11571 }
11572
11573 __extension__ extern __inline uint32x4_t
11574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11575 __arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11576 {
11577 return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p);
11578 }
11579
11580 __extension__ extern __inline uint16x8_t
11581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11582 __arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11583 {
11584 return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
11585 }
11586
11587 __extension__ extern __inline int8x16_t
11588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11589 __arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11590 {
11591 return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
11592 }
11593
11594 __extension__ extern __inline int32x4_t
11595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11596 __arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11597 {
11598 return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p);
11599 }
11600
11601 __extension__ extern __inline int16x8_t
11602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11603 __arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11604 {
11605 return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
11606 }
11607
11608 __extension__ extern __inline uint8x16_t
11609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11610 __arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11611 {
11612 return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
11613 }
11614
11615 __extension__ extern __inline uint32x4_t
11616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11617 __arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11618 {
11619 return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p);
11620 }
11621
11622 __extension__ extern __inline uint16x8_t
11623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11624 __arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11625 {
11626 return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
11627 }
11628
11629 __extension__ extern __inline int8x16_t
11630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11631 __arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
11632 {
11633 return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
11634 }
11635
11636 __extension__ extern __inline int32x4_t
11637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11638 __arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
11639 {
11640 return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p);
11641 }
11642
11643 __extension__ extern __inline int16x8_t
11644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11645 __arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
11646 {
11647 return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
11648 }
11649
11650 __extension__ extern __inline uint8x16_t
11651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11652 __arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
11653 {
11654 return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
11655 }
11656
11657 __extension__ extern __inline uint32x4_t
11658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11659 __arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
11660 {
11661 return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p);
11662 }
11663
11664 __extension__ extern __inline uint16x8_t
11665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11666 __arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
11667 {
11668 return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
11669 }
11670
11671 __extension__ extern __inline int8x16_t
11672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11673 __arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
11674 {
11675 return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p);
11676 }
11677
11678 __extension__ extern __inline int32x4_t
11679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11680 __arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11681 {
11682 return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p);
11683 }
11684
11685 __extension__ extern __inline int16x8_t
11686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11687 __arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11688 {
11689 return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p);
11690 }
11691
11692 __extension__ extern __inline uint8x16_t
11693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11694 __arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
11695 {
11696 return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p);
11697 }
11698
11699 __extension__ extern __inline uint32x4_t
11700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11701 __arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11702 {
11703 return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p);
11704 }
11705
11706 __extension__ extern __inline uint16x8_t
11707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11708 __arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11709 {
11710 return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p);
11711 }
11712
11713 __extension__ extern __inline int8x16_t
11714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11715 __arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
11716 {
11717 return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p);
11718 }
11719
11720 __extension__ extern __inline int32x4_t
11721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11722 __arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11723 {
11724 return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p);
11725 }
11726
11727 __extension__ extern __inline int16x8_t
11728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11729 __arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11730 {
11731 return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p);
11732 }
11733
11734 __extension__ extern __inline uint8x16_t
11735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11736 __arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
11737 {
11738 return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p);
11739 }
11740
11741 __extension__ extern __inline uint32x4_t
11742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11743 __arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
11744 {
11745 return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p);
11746 }
11747
11748 __extension__ extern __inline uint16x8_t
11749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11750 __arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
11751 {
11752 return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p);
11753 }
11754
11755 __extension__ extern __inline int64_t
11756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11757 __arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11758 {
11759 return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p);
11760 }
11761
11762 __extension__ extern __inline int64_t
11763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11764 __arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11765 {
11766 return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p);
11767 }
11768
11769 __extension__ extern __inline uint64_t
11770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11771 __arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
11772 {
11773 return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p);
11774 }
11775
11776 __extension__ extern __inline uint64_t
11777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11778 __arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
11779 {
11780 return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p);
11781 }
11782
11783 __extension__ extern __inline int64_t
11784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11785 __arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11786 {
11787 return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p);
11788 }
11789
11790 __extension__ extern __inline int64_t
11791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11792 __arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11793 {
11794 return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p);
11795 }
11796
11797 __extension__ extern __inline int64_t
11798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11799 __arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11800 {
11801 return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p);
11802 }
11803
11804 __extension__ extern __inline int64_t
11805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11806 __arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11807 {
11808 return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p);
11809 }
11810
11811 __extension__ extern __inline int64_t
11812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11813 __arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
11814 {
11815 return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p);
11816 }
11817
11818 __extension__ extern __inline int64_t
11819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11820 __arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
11821 {
11822 return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p);
11823 }
11824
11825 __extension__ extern __inline uint16x8_t
11826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11827 __arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11828 {
11829 return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p);
11830 }
11831
11832 __extension__ extern __inline uint32x4_t
11833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11834 __arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11835 {
11836 return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p);
11837 }
11838
11839 __extension__ extern __inline uint16x8_t
11840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11841 __arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
11842 {
11843 return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p);
11844 }
11845
11846 __extension__ extern __inline uint32x4_t
11847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11848 __arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
11849 {
11850 return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p);
11851 }
11852
11853 __extension__ extern __inline int64x2_t
11854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11855 __arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11856 {
11857 return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p);
11858 }
11859
11860 __extension__ extern __inline int32x4_t
11861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11862 __arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11863 {
11864 return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p);
11865 }
11866
11867 __extension__ extern __inline int64x2_t
11868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11869 __arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11870 {
11871 return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p);
11872 }
11873
11874 __extension__ extern __inline int32x4_t
11875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11876 __arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11877 {
11878 return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p);
11879 }
11880
11881 __extension__ extern __inline int64x2_t
11882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11883 __arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
11884 {
11885 return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p);
11886 }
11887
11888 __extension__ extern __inline int32x4_t
11889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11890 __arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
11891 {
11892 return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p);
11893 }
11894
11895 __extension__ extern __inline int64x2_t
11896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11897 __arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
11898 {
11899 return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p);
11900 }
11901
11902 __extension__ extern __inline int32x4_t
11903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11904 __arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
11905 {
11906 return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p);
11907 }
11908
11909 __extension__ extern __inline int16x8_t
11910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11911 __arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11912 {
11913 return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
11914 }
11915
11916 __extension__ extern __inline int8x16_t
11917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11918 __arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11919 {
11920 return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
11921 }
11922
11923 __extension__ extern __inline uint16x8_t
11924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11925 __arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11926 {
11927 return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
11928 }
11929
11930 __extension__ extern __inline uint8x16_t
11931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11932 __arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11933 {
11934 return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
11935 }
11936
11937 __extension__ extern __inline int16x8_t
11938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11939 __arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11940 {
11941 return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p);
11942 }
11943
11944 __extension__ extern __inline int8x16_t
11945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11946 __arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11947 {
11948 return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
11949 }
11950
11951 __extension__ extern __inline uint16x8_t
11952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11953 __arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
11954 {
11955 return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p);
11956 }
11957
11958 __extension__ extern __inline uint8x16_t
11959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11960 __arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
11961 {
11962 return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
11963 }
11964
11965 __extension__ extern __inline uint16x8_t
11966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11967 __arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11968 {
11969 return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p);
11970 }
11971
11972 __extension__ extern __inline uint8x16_t
11973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11974 __arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11975 {
11976 return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
11977 }
11978
11979 __extension__ extern __inline uint16x8_t
11980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11981 __arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11982 {
11983 return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p);
11984 }
11985
11986 __extension__ extern __inline uint8x16_t
11987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11988 __arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
11989 {
11990 return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p);
11991 }
11992
11993 __extension__ extern __inline int16x8_t
11994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11995 __arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
11996 {
11997 return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p);
11998 }
11999
12000 __extension__ extern __inline int8x16_t
12001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12002 __arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12003 {
12004 return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
12005 }
12006
12007 __extension__ extern __inline uint16x8_t
12008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12009 __arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12010 {
12011 return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12012 }
12013
12014 __extension__ extern __inline uint8x16_t
12015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12016 __arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12017 {
12018 return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12019 }
12020
12021 __extension__ extern __inline int16x8_t
12022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12023 __arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12024 {
12025 return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p);
12026 }
12027
12028 __extension__ extern __inline int8x16_t
12029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12030 __arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12031 {
12032 return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12033 }
12034
12035 __extension__ extern __inline uint16x8_t
12036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12037 __arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12038 {
12039 return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p);
12040 }
12041
12042 __extension__ extern __inline uint8x16_t
12043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12044 __arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12045 {
12046 return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12047 }
12048
12049 __extension__ extern __inline uint16x8_t
12050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12051 __arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12052 {
12053 return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p);
12054 }
12055
12056 __extension__ extern __inline uint8x16_t
12057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12058 __arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12059 {
12060 return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
12061 }
12062
12063 __extension__ extern __inline uint16x8_t
12064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12065 __arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12066 {
12067 return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p);
12068 }
12069
12070 __extension__ extern __inline uint8x16_t
12071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12072 __arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12073 {
12074 return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p);
12075 }
12076
12077 __extension__ extern __inline int64_t
12078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12079 __arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12080 {
12081 return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p);
12082 }
12083
12084 __extension__ extern __inline uint64_t
12085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12086 __arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
12087 {
12088 return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p);
12089 }
12090
12091 __extension__ extern __inline int64_t
12092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12093 __arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12094 {
12095 return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p);
12096 }
12097
12098 __extension__ extern __inline int64_t
12099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12100 __arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12101 {
12102 return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p);
12103 }
12104
12105 __extension__ extern __inline int64_t
12106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12107 __arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
12108 {
12109 return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p);
12110 }
12111
12112 __extension__ extern __inline int16x8_t
12113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12114 __arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12115 {
12116 return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
12117 }
12118
12119 __extension__ extern __inline int8x16_t
12120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12121 __arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12122 {
12123 return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
12124 }
12125
12126 __extension__ extern __inline uint16x8_t
12127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12128 __arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12129 {
12130 return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12131 }
12132
12133 __extension__ extern __inline uint8x16_t
12134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12135 __arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12136 {
12137 return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12138 }
12139
12140 __extension__ extern __inline int16x8_t
12141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12142 __arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12143 {
12144 return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p);
12145 }
12146
12147 __extension__ extern __inline int8x16_t
12148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12149 __arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12150 {
12151 return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12152 }
12153
12154 __extension__ extern __inline uint16x8_t
12155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12156 __arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12157 {
12158 return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p);
12159 }
12160
12161 __extension__ extern __inline uint8x16_t
12162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12163 __arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12164 {
12165 return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12166 }
12167
12168 __extension__ extern __inline int16x8_t
12169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12170 __arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
12171 {
12172 return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p);
12173 }
12174
12175 __extension__ extern __inline int32x4_t
12176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12177 __arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
12178 {
12179 return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p);
12180 }
12181
12182 __extension__ extern __inline uint16x8_t
12183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12184 __arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
12185 {
12186 return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p);
12187 }
12188
12189 __extension__ extern __inline uint32x4_t
12190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12191 __arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
12192 {
12193 return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p);
12194 }
12195
12196 __extension__ extern __inline int16x8_t
12197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12198 __arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
12199 {
12200 return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p);
12201 }
12202
12203 __extension__ extern __inline int32x4_t
12204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12205 __arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
12206 {
12207 return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p);
12208 }
12209
12210 __extension__ extern __inline uint16x8_t
12211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12212 __arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
12213 {
12214 return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p);
12215 }
12216
12217 __extension__ extern __inline uint32x4_t
12218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12219 __arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
12220 {
12221 return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p);
12222 }
12223
12224 __extension__ extern __inline int16x8_t
12225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12226 __arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12227 {
12228 return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p);
12229 }
12230
12231 __extension__ extern __inline int8x16_t
12232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12233 __arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12234 {
12235 return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
12236 }
12237
12238 __extension__ extern __inline uint16x8_t
12239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12240 __arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12241 {
12242 return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p);
12243 }
12244
12245 __extension__ extern __inline uint8x16_t
12246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12247 __arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12248 {
12249 return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
12250 }
12251
12252 __extension__ extern __inline int16x8_t
12253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12254 __arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
12255 {
12256 return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p);
12257 }
12258
12259 __extension__ extern __inline int8x16_t
12260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12261 __arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
12262 {
12263 return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p);
12264 }
12265
12266 __extension__ extern __inline uint16x8_t
12267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12268 __arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
12269 {
12270 return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p);
12271 }
12272
12273 __extension__ extern __inline uint8x16_t
12274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12275 __arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
12276 {
12277 return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p);
12278 }
12279
12280 __extension__ extern __inline void
12281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12282 __arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
12283 {
12284 __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value);
12285 }
12286
12287 __extension__ extern __inline void
12288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12289 __arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
12290 {
12291 __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value);
12292 }
12293
12294 __extension__ extern __inline void
12295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12296 __arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
12297 {
12298 __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value);
12299 }
12300
12301 __extension__ extern __inline void
12302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12303 __arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
12304 {
12305 __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value);
12306 }
12307
12308 __extension__ extern __inline void
12309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12310 __arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
12311 {
12312 __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value);
12313 }
12314
12315 __extension__ extern __inline void
12316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12317 __arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
12318 {
12319 __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value);
12320 }
12321
12322 __extension__ extern __inline void
12323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12324 __arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value)
12325 {
12326 __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value);
12327 }
12328
12329 __extension__ extern __inline void
12330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12331 __arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value)
12332 {
12333 __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value);
12334 }
12335
12336 __extension__ extern __inline void
12337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12338 __arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value)
12339 {
12340 __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value);
12341 }
12342
12343 __extension__ extern __inline void
12344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12345 __arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value)
12346 {
12347 __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value);
12348 }
12349
12350 __extension__ extern __inline void
12351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12352 __arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value)
12353 {
12354 __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value);
12355 }
12356
12357 __extension__ extern __inline void
12358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12359 __arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value)
12360 {
12361 __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value);
12362 }
12363
12364 __extension__ extern __inline void
12365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12366 __arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value)
12367 {
12368 __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value);
12369 }
12370
12371 __extension__ extern __inline void
12372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12373 __arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value)
12374 {
12375 __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value);
12376 }
12377
12378 __extension__ extern __inline uint8x16_t
12379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12380 __arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset)
12381 {
12382 return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset);
12383 }
12384
12385 __extension__ extern __inline int8x16_t
12386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12387 __arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset)
12388 {
12389 return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset);
12390 }
12391
12392 __extension__ extern __inline int8x16_t
12393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12394 __arm_vldrbq_s8 (int8_t const * __base)
12395 {
12396 return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base);
12397 }
12398
12399 __extension__ extern __inline uint8x16_t
12400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12401 __arm_vldrbq_u8 (uint8_t const * __base)
12402 {
12403 return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base);
12404 }
12405
12406 __extension__ extern __inline uint16x8_t
12407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12408 __arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset)
12409 {
12410 return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset);
12411 }
12412
12413 __extension__ extern __inline int16x8_t
12414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12415 __arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset)
12416 {
12417 return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset);
12418 }
12419
12420 __extension__ extern __inline int16x8_t
12421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12422 __arm_vldrbq_s16 (int8_t const * __base)
12423 {
12424 return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base);
12425 }
12426
12427 __extension__ extern __inline uint16x8_t
12428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12429 __arm_vldrbq_u16 (uint8_t const * __base)
12430 {
12431 return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base);
12432 }
12433
12434 __extension__ extern __inline uint32x4_t
12435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12436 __arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset)
12437 {
12438 return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset);
12439 }
12440
12441 __extension__ extern __inline int32x4_t
12442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12443 __arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset)
12444 {
12445 return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset);
12446 }
12447
12448 __extension__ extern __inline int32x4_t
12449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12450 __arm_vldrbq_s32 (int8_t const * __base)
12451 {
12452 return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base);
12453 }
12454
12455 __extension__ extern __inline uint32x4_t
12456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12457 __arm_vldrbq_u32 (uint8_t const * __base)
12458 {
12459 return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base);
12460 }
12461
12462 __extension__ extern __inline int32x4_t
12463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12464 __arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset)
12465 {
12466 return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset);
12467 }
12468
12469 __extension__ extern __inline uint32x4_t
12470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12471 __arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset)
12472 {
12473 return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset);
12474 }
12475
12476 __extension__ extern __inline void
12477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12478 __arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
12479 {
12480 __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p);
12481 }
12482
12483 __extension__ extern __inline void
12484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12485 __arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
12486 {
12487 __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p);
12488 }
12489
12490 __extension__ extern __inline void
12491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12492 __arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
12493 {
12494 __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p);
12495 }
12496
12497 __extension__ extern __inline void
12498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12499 __arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
12500 {
12501 __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p);
12502 }
12503
12504 __extension__ extern __inline void
12505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12506 __arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
12507 {
12508 __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p);
12509 }
12510
12511 __extension__ extern __inline void
12512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12513 __arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
12514 {
12515 __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p);
12516 }
12517
12518 __extension__ extern __inline void
12519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12520 __arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
12521 {
12522 __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12523 }
12524
12525 __extension__ extern __inline void
12526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12527 __arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
12528 {
12529 __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
12530 }
12531
12532 __extension__ extern __inline void
12533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12534 __arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
12535 {
12536 __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12537 }
12538
12539 __extension__ extern __inline void
12540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12541 __arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
12542 {
12543 __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12544 }
12545
12546 __extension__ extern __inline void
12547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12548 __arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
12549 {
12550 __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
12551 }
12552
12553 __extension__ extern __inline void
12554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12555 __arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
12556 {
12557 __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
12558 }
12559
12560 __extension__ extern __inline void
12561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12562 __arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
12563 {
12564 __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p);
12565 }
12566
12567 __extension__ extern __inline void
12568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12569 __arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
12570 {
12571 __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p);
12572 }
12573
12574 __extension__ extern __inline int8x16_t
12575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12576 __arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
12577 {
12578 return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p);
12579 }
12580
12581 __extension__ extern __inline int32x4_t
12582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12583 __arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12584 {
12585 return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p);
12586 }
12587
12588 __extension__ extern __inline int16x8_t
12589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12590 __arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12591 {
12592 return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p);
12593 }
12594
12595 __extension__ extern __inline uint8x16_t
12596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12597 __arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
12598 {
12599 return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p);
12600 }
12601
12602 __extension__ extern __inline uint32x4_t
12603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12604 __arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12605 {
12606 return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p);
12607 }
12608
12609 __extension__ extern __inline uint16x8_t
12610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12611 __arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12612 {
12613 return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p);
12614 }
12615
12616 __extension__ extern __inline int8x16_t
12617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12618 __arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p)
12619 {
12620 return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p);
12621 }
12622
12623 __extension__ extern __inline int32x4_t
12624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12625 __arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p)
12626 {
12627 return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p);
12628 }
12629
12630 __extension__ extern __inline int16x8_t
12631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12632 __arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p)
12633 {
12634 return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p);
12635 }
12636
12637 __extension__ extern __inline uint8x16_t
12638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12639 __arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p)
12640 {
12641 return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p);
12642 }
12643
12644 __extension__ extern __inline uint32x4_t
12645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12646 __arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p)
12647 {
12648 return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p);
12649 }
12650
12651 __extension__ extern __inline uint16x8_t
12652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12653 __arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p)
12654 {
12655 return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p);
12656 }
12657
12658 __extension__ extern __inline int32x4_t
12659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12660 __arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
12661 {
12662 return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p);
12663 }
12664
12665 __extension__ extern __inline uint32x4_t
12666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12667 __arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
12668 {
12669 return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p);
12670 }
12671
12672 __extension__ extern __inline int8x16_t
12673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12674 __arm_vld1q_s8 (int8_t const * __base)
12675 {
12676 return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base);
12677 }
12678
12679 __extension__ extern __inline int32x4_t
12680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12681 __arm_vld1q_s32 (int32_t const * __base)
12682 {
12683 return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base);
12684 }
12685
12686 __extension__ extern __inline int16x8_t
12687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12688 __arm_vld1q_s16 (int16_t const * __base)
12689 {
12690 return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base);
12691 }
12692
12693 __extension__ extern __inline uint8x16_t
12694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12695 __arm_vld1q_u8 (uint8_t const * __base)
12696 {
12697 return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base);
12698 }
12699
12700 __extension__ extern __inline uint32x4_t
12701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12702 __arm_vld1q_u32 (uint32_t const * __base)
12703 {
12704 return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base);
12705 }
12706
12707 __extension__ extern __inline uint16x8_t
12708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12709 __arm_vld1q_u16 (uint16_t const * __base)
12710 {
12711 return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base);
12712 }
12713
12714 __extension__ extern __inline int32x4_t
12715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12716 __arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset)
12717 {
12718 return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
12719 }
12720
12721 __extension__ extern __inline int16x8_t
12722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12723 __arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset)
12724 {
12725 return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
12726 }
12727
12728 __extension__ extern __inline uint32x4_t
12729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12730 __arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
12731 {
12732 return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
12733 }
12734
12735 __extension__ extern __inline uint16x8_t
12736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12737 __arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
12738 {
12739 return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
12740 }
12741
12742 __extension__ extern __inline int32x4_t
12743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12744 __arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12745 {
12746 return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
12747 }
12748
12749 __extension__ extern __inline int16x8_t
12750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12751 __arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12752 {
12753 return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12754 }
12755
12756 __extension__ extern __inline uint32x4_t
12757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12758 __arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12759 {
12760 return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
12761 }
12762
12763 __extension__ extern __inline uint16x8_t
12764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12765 __arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12766 {
12767 return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12768 }
12769
12770 __extension__ extern __inline int32x4_t
12771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12772 __arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset)
12773 {
12774 return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
12775 }
12776
12777 __extension__ extern __inline int16x8_t
12778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12779 __arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset)
12780 {
12781 return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
12782 }
12783
12784 __extension__ extern __inline uint32x4_t
12785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12786 __arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
12787 {
12788 return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
12789 }
12790
12791 __extension__ extern __inline uint16x8_t
12792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12793 __arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
12794 {
12795 return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
12796 }
12797
12798 __extension__ extern __inline int32x4_t
12799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12800 __arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12801 {
12802 return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
12803 }
12804
12805 __extension__ extern __inline int16x8_t
12806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12807 __arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12808 {
12809 return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12810 }
12811
12812 __extension__ extern __inline uint32x4_t
12813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12814 __arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
12815 {
12816 return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
12817 }
12818
12819 __extension__ extern __inline uint16x8_t
12820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12821 __arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
12822 {
12823 return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
12824 }
12825
12826 __extension__ extern __inline int32x4_t
12827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12828 __arm_vldrhq_s32 (int16_t const * __base)
12829 {
12830 return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base);
12831 }
12832
12833 __extension__ extern __inline int16x8_t
12834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12835 __arm_vldrhq_s16 (int16_t const * __base)
12836 {
12837 return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base);
12838 }
12839
12840 __extension__ extern __inline uint32x4_t
12841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12842 __arm_vldrhq_u32 (uint16_t const * __base)
12843 {
12844 return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base);
12845 }
12846
12847 __extension__ extern __inline uint16x8_t
12848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12849 __arm_vldrhq_u16 (uint16_t const * __base)
12850 {
12851 return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base);
12852 }
12853
12854 __extension__ extern __inline int32x4_t
12855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12856 __arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p)
12857 {
12858 return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p);
12859 }
12860
12861 __extension__ extern __inline int16x8_t
12862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12863 __arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p)
12864 {
12865 return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p);
12866 }
12867
12868 __extension__ extern __inline uint32x4_t
12869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12870 __arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p)
12871 {
12872 return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p);
12873 }
12874
12875 __extension__ extern __inline uint16x8_t
12876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12877 __arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p)
12878 {
12879 return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p);
12880 }
12881
12882 __extension__ extern __inline int32x4_t
12883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12884 __arm_vldrwq_s32 (int32_t const * __base)
12885 {
12886 return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base);
12887 }
12888
12889 __extension__ extern __inline uint32x4_t
12890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12891 __arm_vldrwq_u32 (uint32_t const * __base)
12892 {
12893 return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base);
12894 }
12895
12896
12897 __extension__ extern __inline int32x4_t
12898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12899 __arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p)
12900 {
12901 return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p);
12902 }
12903
12904 __extension__ extern __inline uint32x4_t
12905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12906 __arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p)
12907 {
12908 return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p);
12909 }
12910
12911 __extension__ extern __inline int64x2_t
12912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12913 __arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset)
12914 {
12915 return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset);
12916 }
12917
12918 __extension__ extern __inline uint64x2_t
12919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12920 __arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset)
12921 {
12922 return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset);
12923 }
12924
12925 __extension__ extern __inline int64x2_t
12926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12927 __arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
12928 {
12929 return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p);
12930 }
12931
12932 __extension__ extern __inline uint64x2_t
12933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12934 __arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
12935 {
12936 return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p);
12937 }
12938
12939 __extension__ extern __inline int64x2_t
12940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12941 __arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset)
12942 {
12943 return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset);
12944 }
12945
12946 __extension__ extern __inline uint64x2_t
12947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12948 __arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
12949 {
12950 return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset);
12951 }
12952
12953 __extension__ extern __inline int64x2_t
12954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12955 __arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12956 {
12957 return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
12958 }
12959
12960
12961 __extension__ extern __inline uint64x2_t
12962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12963 __arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12964 {
12965 return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
12966 }
12967
12968 __extension__ extern __inline int64x2_t
12969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12970 __arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset)
12971 {
12972 return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset);
12973 }
12974
12975 __extension__ extern __inline uint64x2_t
12976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12977 __arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
12978 {
12979 return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset);
12980 }
12981
12982 __extension__ extern __inline int64x2_t
12983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12984 __arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12985 {
12986 return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
12987 }
12988
12989 __extension__ extern __inline uint64x2_t
12990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12991 __arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
12992 {
12993 return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
12994 }
12995
12996 __extension__ extern __inline int32x4_t
12997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12998 __arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset)
12999 {
13000 return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset);
13001 }
13002
13003 __extension__ extern __inline uint32x4_t
13004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13005 __arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
13006 {
13007 return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset);
13008 }
13009
13010 __extension__ extern __inline int32x4_t
13011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13012 __arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13013 {
13014 return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
13015 }
13016
13017 __extension__ extern __inline uint32x4_t
13018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13019 __arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13020 {
13021 return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
13022 }
13023
13024 __extension__ extern __inline int32x4_t
13025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13026 __arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset)
13027 {
13028 return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset);
13029 }
13030
13031 __extension__ extern __inline uint32x4_t
13032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13033 __arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
13034 {
13035 return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset);
13036 }
13037
13038 __extension__ extern __inline int32x4_t
13039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13040 __arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13041 {
13042 return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
13043 }
13044
13045 __extension__ extern __inline uint32x4_t
13046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13047 __arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
13048 {
13049 return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
13050 }
13051
13052 __extension__ extern __inline void
13053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13054 __arm_vst1q_s8 (int8_t * __addr, int8x16_t __value)
13055 {
13056 __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value);
13057 }
13058
13059 __extension__ extern __inline void
13060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13061 __arm_vst1q_s32 (int32_t * __addr, int32x4_t __value)
13062 {
13063 __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value);
13064 }
13065
13066 __extension__ extern __inline void
13067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13068 __arm_vst1q_s16 (int16_t * __addr, int16x8_t __value)
13069 {
13070 __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value);
13071 }
13072
13073 __extension__ extern __inline void
13074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13075 __arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value)
13076 {
13077 __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value);
13078 }
13079
13080 __extension__ extern __inline void
13081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13082 __arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value)
13083 {
13084 __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value);
13085 }
13086
13087 __extension__ extern __inline void
13088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13089 __arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value)
13090 {
13091 __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value);
13092 }
13093
13094 __extension__ extern __inline void
13095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13096 __arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
13097 {
13098 __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
13099 }
13100
13101 __extension__ extern __inline void
13102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13103 __arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
13104 {
13105 __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13106 }
13107
13108 __extension__ extern __inline void
13109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13110 __arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
13111 {
13112 __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
13113 }
13114
13115 __extension__ extern __inline void
13116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13117 __arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
13118 {
13119 __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13120 }
13121
13122 __extension__ extern __inline void
13123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13124 __arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13125 {
13126 __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13127 }
13128
13129 __extension__ extern __inline void
13130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13131 __arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
13132 {
13133 __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13134 }
13135
13136 __extension__ extern __inline void
13137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13138 __arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13139 {
13140 __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13141 }
13142
13143 __extension__ extern __inline void
13144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13145 __arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
13146 {
13147 __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13148 }
13149
13150 __extension__ extern __inline void
13151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13152 __arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
13153 {
13154 __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
13155 }
13156
13157 __extension__ extern __inline void
13158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13159 __arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
13160 {
13161 __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13162 }
13163
13164 __extension__ extern __inline void
13165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13166 __arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
13167 {
13168 __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
13169 }
13170
13171 __extension__ extern __inline void
13172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13173 __arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
13174 {
13175 __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
13176 }
13177
13178 __extension__ extern __inline void
13179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13180 __arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13181 {
13182 __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13183 }
13184
13185 __extension__ extern __inline void
13186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13187 __arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
13188 {
13189 __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13190 }
13191
13192 __extension__ extern __inline void
13193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13194 __arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13195 {
13196 __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
13197 }
13198
13199 __extension__ extern __inline void
13200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13201 __arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
13202 {
13203 __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
13204 }
13205
13206 __extension__ extern __inline void
13207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13208 __arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value)
13209 {
13210 __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value);
13211 }
13212
13213 __extension__ extern __inline void
13214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13215 __arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value)
13216 {
13217 __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value);
13218 }
13219
13220 __extension__ extern __inline void
13221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13222 __arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value)
13223 {
13224 __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value);
13225 }
13226
13227 __extension__ extern __inline void
13228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13229 __arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value)
13230 {
13231 __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value);
13232 }
13233
13234 __extension__ extern __inline void
13235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13236 __arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
13237 {
13238 __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p);
13239 }
13240
13241 __extension__ extern __inline void
13242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13243 __arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
13244 {
13245 __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p);
13246 }
13247
13248 __extension__ extern __inline void
13249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13250 __arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
13251 {
13252 __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p);
13253 }
13254
13255 __extension__ extern __inline void
13256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13257 __arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
13258 {
13259 __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p);
13260 }
13261
13262 __extension__ extern __inline void
13263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13264 __arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value)
13265 {
13266 __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value);
13267 }
13268
13269 __extension__ extern __inline void
13270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13271 __arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value)
13272 {
13273 __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value);
13274 }
13275
13276 __extension__ extern __inline void
13277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13278 __arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
13279 {
13280 __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p);
13281 }
13282
13283 __extension__ extern __inline void
13284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13285 __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
13286 {
13287 __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p);
13288 }
13289
13290 __extension__ extern __inline void
13291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13292 __arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
13293 {
13294 __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p);
13295 }
13296
13297 __extension__ extern __inline void
13298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13299 __arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
13300 {
13301 __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p);
13302 }
13303
13304 __extension__ extern __inline void
13305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13306 __arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value)
13307 {
13308 __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value);
13309 }
13310
13311 __extension__ extern __inline void
13312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13313 __arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value)
13314 {
13315 __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value);
13316 }
13317
13318 __extension__ extern __inline void
13319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13320 __arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
13321 {
13322 __builtin_mve_vstrdq_scatter_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13323 }
13324
13325 __extension__ extern __inline void
13326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13327 __arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
13328 {
13329 __builtin_mve_vstrdq_scatter_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13330 }
13331
13332 __extension__ extern __inline void
13333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13334 __arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
13335 {
13336 __builtin_mve_vstrdq_scatter_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
13337 }
13338
13339 __extension__ extern __inline void
13340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13341 __arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
13342 {
13343 __builtin_mve_vstrdq_scatter_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
13344 }
13345
13346 __extension__ extern __inline void
13347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13348 __arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
13349 {
13350 __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13351 }
13352
13353 __extension__ extern __inline void
13354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13355 __arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
13356 {
13357 __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
13358 }
13359
13360 __extension__ extern __inline void
13361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13362 __arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
13363 {
13364 __builtin_mve_vstrdq_scatter_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
13365 }
13366
13367 __extension__ extern __inline void
13368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13369 __arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
13370 {
13371 __builtin_mve_vstrdq_scatter_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
13372 }
13373
13374 __extension__ extern __inline void
13375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13376 __arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13377 {
13378 __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13379 }
13380
13381 __extension__ extern __inline void
13382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13383 __arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13384 {
13385 __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13386 }
13387
13388 __extension__ extern __inline void
13389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13390 __arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
13391 {
13392 __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
13393 }
13394
13395 __extension__ extern __inline void
13396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13397 __arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
13398 {
13399 __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
13400 }
13401
13402 __extension__ extern __inline void
13403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13404 __arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
13405 {
13406 __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13407 }
13408
13409 __extension__ extern __inline void
13410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13411 __arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
13412 {
13413 __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
13414 }
13415
13416 __extension__ extern __inline void
13417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13418 __arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
13419 {
13420 __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
13421 }
13422
13423 __extension__ extern __inline void
13424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13425 __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
13426 {
13427 __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
13428 }
13429
13430 __extension__ extern __inline int8x16_t
13431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13432 __arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
13433 {
13434 return __a + __b;
13435 }
13436
13437 __extension__ extern __inline int16x8_t
13438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13439 __arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
13440 {
13441 return __a + __b;
13442 }
13443
13444 __extension__ extern __inline int32x4_t
13445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13446 __arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
13447 {
13448 return __a + __b;
13449 }
13450
13451 __extension__ extern __inline uint8x16_t
13452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13453 __arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
13454 {
13455 return __a + __b;
13456 }
13457
13458 __extension__ extern __inline uint16x8_t
13459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13460 __arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
13461 {
13462 return __a + __b;
13463 }
13464
13465 __extension__ extern __inline uint32x4_t
13466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13467 __arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
13468 {
13469 return __a + __b;
13470 }
13471
13472 __extension__ extern __inline uint8x16_t
13473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13474 __arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13475 {
13476 return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p);
13477 }
13478
13479 __extension__ extern __inline uint32x4_t
13480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13481 __arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13482 {
13483 return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p);
13484 }
13485
13486 __extension__ extern __inline uint16x8_t
13487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13488 __arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13489 {
13490 return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p);
13491 }
13492
13493 __extension__ extern __inline uint8x16_t
13494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13495 __arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13496 {
13497 uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p);
13498 *__a -= __imm * 16u;
13499 return __res;
13500 }
13501
13502 __extension__ extern __inline uint16x8_t
13503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13504 __arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13505 {
13506 uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
13507 *__a -= __imm * 8u;
13508 return __res;
13509 }
13510
13511 __extension__ extern __inline uint32x4_t
13512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13513 __arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13514 {
13515 uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p);
13516 *__a -= __imm * 4u;
13517 return __res;
13518 }
13519
13520 __extension__ extern __inline uint8x16_t
13521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13522 __arm_vddupq_n_u8 (uint32_t __a, const int __imm)
13523 {
13524 return __builtin_mve_vddupq_n_uv16qi (__a, __imm);
13525 }
13526
13527 __extension__ extern __inline uint32x4_t
13528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13529 __arm_vddupq_n_u32 (uint32_t __a, const int __imm)
13530 {
13531 return __builtin_mve_vddupq_n_uv4si (__a, __imm);
13532 }
13533
13534 __extension__ extern __inline uint16x8_t
13535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13536 __arm_vddupq_n_u16 (uint32_t __a, const int __imm)
13537 {
13538 return __builtin_mve_vddupq_n_uv8hi (__a, __imm);
13539 }
13540
13541 __extension__ extern __inline uint8x16_t
13542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13543 __arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13544 {
13545 uint64_t __c = ((uint64_t) __b) << 32;
13546 return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
13547 }
13548
13549 __extension__ extern __inline uint32x4_t
13550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13551 __arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13552 {
13553 uint64_t __c = ((uint64_t) __b) << 32;
13554 return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
13555 }
13556
13557 __extension__ extern __inline uint16x8_t
13558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13559 __arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13560 {
13561 uint64_t __c = ((uint64_t) __b) << 32;
13562 return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
13563 }
13564
13565 __extension__ extern __inline uint8x16_t
13566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13567 __arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13568 {
13569 uint64_t __c = ((uint64_t) __b) << 32;
13570 uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
13571 *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
13572 return __res;
13573 }
13574
13575 __extension__ extern __inline uint32x4_t
13576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13577 __arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13578 {
13579 uint64_t __c = ((uint64_t) __b) << 32;
13580 uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
13581 *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
13582 return __res;
13583 }
13584
13585 __extension__ extern __inline uint16x8_t
13586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13587 __arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13588 {
13589 uint64_t __c = ((uint64_t) __b) << 32;
13590 uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
13591 *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
13592 return __res;
13593 }
13594
13595 __extension__ extern __inline uint8x16_t
13596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13597 __arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
13598 {
13599 uint64_t __c = ((uint64_t) __b) << 32;
13600 return __builtin_mve_vdwdupq_n_uv16qi (__a, __c, __imm);
13601 }
13602
13603 __extension__ extern __inline uint32x4_t
13604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13605 __arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
13606 {
13607 uint64_t __c = ((uint64_t) __b) << 32;
13608 return __builtin_mve_vdwdupq_n_uv4si (__a, __c, __imm);
13609 }
13610
13611 __extension__ extern __inline uint16x8_t
13612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13613 __arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
13614 {
13615 uint64_t __c = ((uint64_t) __b) << 32;
13616 return __builtin_mve_vdwdupq_n_uv8hi (__a, __c, __imm);
13617 }
13618
13619 __extension__ extern __inline uint8x16_t
13620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13621 __arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
13622 {
13623 uint64_t __c = ((uint64_t) __b) << 32;
13624 uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __c, __imm);
13625 *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __c, __imm);
13626 return __res;
13627 }
13628
13629 __extension__ extern __inline uint32x4_t
13630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13631 __arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
13632 {
13633 uint64_t __c = ((uint64_t) __b) << 32;
13634 uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __c, __imm);
13635 *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __c, __imm);
13636 return __res;
13637 }
13638
13639 __extension__ extern __inline uint16x8_t
13640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13641 __arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
13642 {
13643 uint64_t __c = ((uint64_t) __b) << 32;
13644 uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __c, __imm);
13645 *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __c, __imm);
13646 return __res;
13647 }
13648
13649 __extension__ extern __inline uint8x16_t
13650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13651 __arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13652 {
13653 return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p);
13654 }
13655
13656 __extension__ extern __inline uint32x4_t
13657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13658 __arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13659 {
13660 return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p);
13661 }
13662
13663 __extension__ extern __inline uint16x8_t
13664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13665 __arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
13666 {
13667 return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p);
13668 }
13669
13670 __extension__ extern __inline uint8x16_t
13671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13672 __arm_vidupq_n_u8 (uint32_t __a, const int __imm)
13673 {
13674 return __builtin_mve_vidupq_n_uv16qi (__a, __imm);
13675 }
13676
13677 __extension__ extern __inline uint8x16_t
13678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13679 __arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13680 {
13681 uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p);
13682 *__a += __imm * 16u;
13683 return __res;
13684 }
13685
13686 __extension__ extern __inline uint16x8_t
13687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13688 __arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13689 {
13690 uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
13691 *__a += __imm * 8u;
13692 return __res;
13693 }
13694
13695 __extension__ extern __inline uint32x4_t
13696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13697 __arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
13698 {
13699 uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p);
13700 *__a += __imm * 4u;
13701 return __res;
13702 }
13703
13704 __extension__ extern __inline uint32x4_t
13705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13706 __arm_vidupq_n_u32 (uint32_t __a, const int __imm)
13707 {
13708 return __builtin_mve_vidupq_n_uv4si (__a, __imm);
13709 }
13710
13711 __extension__ extern __inline uint16x8_t
13712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13713 __arm_vidupq_n_u16 (uint32_t __a, const int __imm)
13714 {
13715 return __builtin_mve_vidupq_n_uv8hi (__a, __imm);
13716 }
13717
13718 __extension__ extern __inline uint8x16_t
13719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13720 __arm_vidupq_wb_u8 (uint32_t * __a, const int __imm)
13721 {
13722 uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm);
13723 *__a += __imm * 16u;
13724 return __res;
13725 }
13726
13727 __extension__ extern __inline uint16x8_t
13728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13729 __arm_vidupq_wb_u16 (uint32_t * __a, const int __imm)
13730 {
13731 uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm);
13732 *__a += __imm * 8u;
13733 return __res;
13734 }
13735
13736 __extension__ extern __inline uint32x4_t
13737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13738 __arm_vidupq_wb_u32 (uint32_t * __a, const int __imm)
13739 {
13740 uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm);
13741 *__a += __imm * 4u;
13742 return __res;
13743 }
13744
13745 __extension__ extern __inline uint8x16_t
13746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13747 __arm_vddupq_wb_u8 (uint32_t * __a, const int __imm)
13748 {
13749 uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm);
13750 *__a -= __imm * 16u;
13751 return __res;
13752 }
13753
13754 __extension__ extern __inline uint16x8_t
13755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13756 __arm_vddupq_wb_u16 (uint32_t * __a, const int __imm)
13757 {
13758 uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm);
13759 *__a -= __imm * 8u;
13760 return __res;
13761 }
13762
13763 __extension__ extern __inline uint32x4_t
13764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13765 __arm_vddupq_wb_u32 (uint32_t * __a, const int __imm)
13766 {
13767 uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm);
13768 *__a -= __imm * 4u;
13769 return __res;
13770 }
13771
13772 __extension__ extern __inline uint8x16_t
13773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13774 __arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13775 {
13776 uint64_t __c = ((uint64_t) __b) << 32;
13777 return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
13778 }
13779
13780 __extension__ extern __inline uint32x4_t
13781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13782 __arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13783 {
13784 uint64_t __c = ((uint64_t) __b) << 32;
13785 return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
13786 }
13787
13788 __extension__ extern __inline uint16x8_t
13789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13790 __arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13791 {
13792 uint64_t __c = ((uint64_t) __b) << 32;
13793 return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
13794 }
13795
13796 __extension__ extern __inline uint8x16_t
13797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13798 __arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13799 {
13800 uint64_t __c = ((uint64_t) __b) << 32;
13801 uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
13802 *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
13803 return __res;
13804 }
13805
13806 __extension__ extern __inline uint32x4_t
13807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13808 __arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13809 {
13810 uint64_t __c = ((uint64_t) __b) << 32;
13811 uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
13812 *__a = __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
13813 return __res;
13814 }
13815
13816 __extension__ extern __inline uint16x8_t
13817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13818 __arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
13819 {
13820 uint64_t __c = ((uint64_t) __b) << 32;
13821 uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
13822 *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
13823 return __res;
13824 }
13825
13826 __extension__ extern __inline uint8x16_t
13827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13828 __arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
13829 {
13830 uint64_t __c = ((uint64_t) __b) << 32;
13831 return __builtin_mve_viwdupq_n_uv16qi (__a, __c, __imm);
13832 }
13833
13834 __extension__ extern __inline uint32x4_t
13835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13836 __arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
13837 {
13838 uint64_t __c = ((uint64_t) __b) << 32;
13839 return __builtin_mve_viwdupq_n_uv4si (__a, __c, __imm);
13840 }
13841
13842 __extension__ extern __inline uint16x8_t
13843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13844 __arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
13845 {
13846 uint64_t __c = ((uint64_t) __b) << 32;
13847 return __builtin_mve_viwdupq_n_uv8hi (__a, __c, __imm);
13848 }
13849
13850 __extension__ extern __inline uint8x16_t
13851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13852 __arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
13853 {
13854 uint64_t __c = ((uint64_t) __b) << 32;
13855 uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __c, __imm);
13856 *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __c, __imm);
13857 return __res;
13858 }
13859
13860 __extension__ extern __inline uint32x4_t
13861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13862 __arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
13863 {
13864 uint64_t __c = ((uint64_t) __b) << 32;
13865 uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __c, __imm);
13866 *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __c, __imm);
13867 return __res;
13868 }
13869
13870 __extension__ extern __inline uint16x8_t
13871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13872 __arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
13873 {
13874 uint64_t __c = ((uint64_t) __b) << 32;
13875 uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __c, __imm);
13876 *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __c, __imm);
13877 return __res;
13878 }
13879
13880
13881 __extension__ extern __inline int64x2_t
13882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13883 __arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset)
13884 {
13885 int64x2_t
13886 result = __builtin_mve_vldrdq_gather_base_nowb_sv2di (*__addr, __offset);
13887 *__addr = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset);
13888 return result;
13889 }
13890
13891 __extension__ extern __inline uint64x2_t
13892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13893 __arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset)
13894 {
13895 uint64x2_t
13896 result = __builtin_mve_vldrdq_gather_base_nowb_uv2di (*__addr, __offset);
13897 *__addr = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset);
13898 return result;
13899 }
13900
13901 __extension__ extern __inline int64x2_t
13902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13903 __arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
13904 {
13905 int64x2_t
13906 result = __builtin_mve_vldrdq_gather_base_nowb_z_sv2di (*__addr, __offset, __p);
13907 *__addr = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p);
13908 return result;
13909 }
13910
13911 __extension__ extern __inline uint64x2_t
13912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13913 __arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
13914 {
13915 uint64x2_t
13916 result = __builtin_mve_vldrdq_gather_base_nowb_z_uv2di (*__addr, __offset, __p);
13917 *__addr = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p);
13918 return result;
13919 }
13920
13921 __extension__ extern __inline int32x4_t
13922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13923 __arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset)
13924 {
13925 int32x4_t
13926 result = __builtin_mve_vldrwq_gather_base_nowb_sv4si (*__addr, __offset);
13927 *__addr = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset);
13928 return result;
13929 }
13930
13931 __extension__ extern __inline uint32x4_t
13932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13933 __arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset)
13934 {
13935 uint32x4_t
13936 result = __builtin_mve_vldrwq_gather_base_nowb_uv4si (*__addr, __offset);
13937 *__addr = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset);
13938 return result;
13939 }
13940
13941 __extension__ extern __inline int32x4_t
13942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13943 __arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
13944 {
13945 int32x4_t
13946 result = __builtin_mve_vldrwq_gather_base_nowb_z_sv4si (*__addr, __offset, __p);
13947 *__addr = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p);
13948 return result;
13949 }
13950
13951 __extension__ extern __inline uint32x4_t
13952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13953 __arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
13954 {
13955 uint32x4_t
13956 result = __builtin_mve_vldrwq_gather_base_nowb_z_uv4si (*__addr, __offset, __p);
13957 *__addr = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p);
13958 return result;
13959 }
13960
13961 __extension__ extern __inline void
13962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13963 __arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value)
13964 {
13965 *__addr = __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value);
13966 }
13967
13968 __extension__ extern __inline void
13969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13970 __arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
13971 {
13972 *__addr = __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value);
13973 }
13974
13975 __extension__ extern __inline void
13976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13977 __arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
13978 {
13979 *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p);
13980 }
13981
13982 __extension__ extern __inline void
13983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13984 __arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
13985 {
13986 *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p);
13987 }
13988
13989 __extension__ extern __inline void
13990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13991 __arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
13992 {
13993 *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p);
13994 }
13995
13996 __extension__ extern __inline void
13997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13998 __arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
13999 {
14000 *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p);
14001 }
14002
14003 __extension__ extern __inline void
14004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14005 __arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value)
14006 {
14007 *__addr = __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value);
14008 }
14009
14010 __extension__ extern __inline void
14011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14012 __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
14013 {
14014 *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value);
14015 }
14016
14017 __extension__ extern __inline uint8x16_t
14018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14019 __arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
14020 {
14021 return __builtin_mve_vddupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
14022 }
14023
14024 __extension__ extern __inline uint16x8_t
14025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14026 __arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
14027 {
14028 return __builtin_mve_vddupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
14029 }
14030
14031 __extension__ extern __inline uint32x4_t
14032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14033 __arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
14034 {
14035 return __builtin_mve_vddupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
14036 }
14037
14038 __extension__ extern __inline uint8x16_t
14039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14040 __arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14041 {
14042 uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14043 uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p);
14044 *__a -= __imm * 16u;
14045 return __res;
14046 }
14047
14048 __extension__ extern __inline uint16x8_t
14049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14050 __arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14051 {
14052 uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14053 uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
14054 *__a -= __imm * 8u;
14055 return __res;
14056 }
14057
14058 __extension__ extern __inline uint32x4_t
14059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14060 __arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14061 {
14062 uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14063 uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p);
14064 *__a -= __imm * 4u;
14065 return __res;
14066 }
14067
14068 __extension__ extern __inline uint8x16_t
14069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14070 __arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14071 {
14072 uint64_t __c = ((uint64_t) __b) << 32;
14073 return __builtin_mve_vdwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
14074 }
14075
14076 __extension__ extern __inline uint16x8_t
14077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14078 __arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14079 {
14080 uint64_t __c = ((uint64_t) __b) << 32;
14081 return __builtin_mve_vdwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
14082 }
14083
14084 __extension__ extern __inline uint32x4_t
14085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14086 __arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14087 {
14088 uint64_t __c = ((uint64_t) __b) << 32;
14089 return __builtin_mve_vdwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
14090 }
14091
14092 __extension__ extern __inline uint8x16_t
14093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14094 __arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14095 {
14096 uint64_t __c = ((uint64_t) __b) << 32;
14097 uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14098 uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
14099 *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
14100 return __res;
14101 }
14102
14103 __extension__ extern __inline uint16x8_t
14104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14105 __arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14106 {
14107 uint64_t __c = ((uint64_t) __b) << 32;
14108 uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14109 uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
14110 *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
14111 return __res;
14112 }
14113
14114 __extension__ extern __inline uint32x4_t
14115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14116 __arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14117 {
14118 uint64_t __c = ((uint64_t) __b) << 32;
14119 uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14120 uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
14121 *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
14122 return __res;
14123 }
14124
14125 __extension__ extern __inline uint8x16_t
14126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14127 __arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
14128 {
14129 return __builtin_mve_vidupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
14130 }
14131
14132 __extension__ extern __inline uint16x8_t
14133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14134 __arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
14135 {
14136 return __builtin_mve_vidupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
14137 }
14138
14139 __extension__ extern __inline uint32x4_t
14140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14141 __arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
14142 {
14143 return __builtin_mve_vidupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
14144 }
14145
14146 __extension__ extern __inline uint8x16_t
14147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14148 __arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14149 {
14150 uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14151 uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p);
14152 *__a += __imm * 16u;
14153 return __res;
14154 }
14155
14156 __extension__ extern __inline uint16x8_t
14157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14158 __arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14159 {
14160 uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14161 uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
14162 *__a += __imm * 8u;
14163 return __res;
14164 }
14165
14166 __extension__ extern __inline uint32x4_t
14167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14168 __arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
14169 {
14170 uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14171 uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p);
14172 *__a += __imm * 4u;
14173 return __res;
14174 }
14175
14176 __extension__ extern __inline uint8x16_t
14177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14178 __arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14179 {
14180 uint64_t __c = ((uint64_t) __b) << 32;
14181 return __builtin_mve_viwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
14182 }
14183
14184 __extension__ extern __inline uint16x8_t
14185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14186 __arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14187 {
14188 uint64_t __c = ((uint64_t) __b) << 32;
14189 return __builtin_mve_viwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
14190 }
14191
14192 __extension__ extern __inline uint32x4_t
14193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14194 __arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
14195 {
14196 uint64_t __c = ((uint64_t) __b) << 32;
14197 return __builtin_mve_viwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
14198 }
14199
14200 __extension__ extern __inline uint8x16_t
14201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14202 __arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14203 {
14204 uint64_t __c = ((uint64_t) __b) << 32;
14205 uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
14206 uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
14207 *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
14208 return __res;
14209 }
14210
14211 __extension__ extern __inline uint16x8_t
14212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14213 __arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14214 {
14215 uint64_t __c = ((uint64_t) __b) << 32;
14216 uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
14217 uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
14218 *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
14219 return __res;
14220 }
14221
14222 __extension__ extern __inline uint32x4_t
14223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14224 __arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
14225 {
14226 uint64_t __c = ((uint64_t) __b) << 32;
14227 uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
14228 uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
14229 *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
14230 return __res;
14231 }
14232
14233 __extension__ extern __inline int8x16_t
14234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14235 __arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p)
14236 {
14237 return __builtin_mve_vdupq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14238 }
14239
14240 __extension__ extern __inline int16x8_t
14241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14242 __arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p)
14243 {
14244 return __builtin_mve_vdupq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14245 }
14246
14247 __extension__ extern __inline int32x4_t
14248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14249 __arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p)
14250 {
14251 return __builtin_mve_vdupq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14252 }
14253
14254 __extension__ extern __inline uint8x16_t
14255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14256 __arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p)
14257 {
14258 return __builtin_mve_vdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
14259 }
14260
14261 __extension__ extern __inline uint16x8_t
14262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14263 __arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p)
14264 {
14265 return __builtin_mve_vdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
14266 }
14267
14268 __extension__ extern __inline uint32x4_t
14269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14270 __arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p)
14271 {
14272 return __builtin_mve_vdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
14273 }
14274
14275 __extension__ extern __inline int8x16_t
14276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14277 __arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14278 {
14279 return __builtin_mve_vminq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14280 }
14281
14282 __extension__ extern __inline int16x8_t
14283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14284 __arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14285 {
14286 return __builtin_mve_vminq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14287 }
14288
14289 __extension__ extern __inline int32x4_t
14290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14291 __arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14292 {
14293 return __builtin_mve_vminq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14294 }
14295
14296 __extension__ extern __inline uint8x16_t
14297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14298 __arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14299 {
14300 return __builtin_mve_vminq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14301 }
14302
14303 __extension__ extern __inline uint16x8_t
14304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14305 __arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14306 {
14307 return __builtin_mve_vminq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14308 }
14309
14310 __extension__ extern __inline uint32x4_t
14311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14312 __arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14313 {
14314 return __builtin_mve_vminq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14315 }
14316
14317 __extension__ extern __inline int8x16_t
14318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14319 __arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14320 {
14321 return __builtin_mve_vmaxq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14322 }
14323
14324 __extension__ extern __inline int16x8_t
14325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14326 __arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14327 {
14328 return __builtin_mve_vmaxq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14329 }
14330
14331 __extension__ extern __inline int32x4_t
14332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14333 __arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14334 {
14335 return __builtin_mve_vmaxq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14336 }
14337
14338 __extension__ extern __inline uint8x16_t
14339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14340 __arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14341 {
14342 return __builtin_mve_vmaxq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14343 }
14344
14345 __extension__ extern __inline uint16x8_t
14346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14347 __arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14348 {
14349 return __builtin_mve_vmaxq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14350 }
14351
14352 __extension__ extern __inline uint32x4_t
14353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14354 __arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14355 {
14356 return __builtin_mve_vmaxq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14357 }
14358
14359 __extension__ extern __inline int8x16_t
14360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14361 __arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14362 {
14363 return __builtin_mve_vabdq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14364 }
14365
14366 __extension__ extern __inline int16x8_t
14367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14368 __arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14369 {
14370 return __builtin_mve_vabdq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14371 }
14372
14373 __extension__ extern __inline int32x4_t
14374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14375 __arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14376 {
14377 return __builtin_mve_vabdq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14378 }
14379
14380 __extension__ extern __inline uint8x16_t
14381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14382 __arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14383 {
14384 return __builtin_mve_vabdq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14385 }
14386
14387 __extension__ extern __inline uint16x8_t
14388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14389 __arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14390 {
14391 return __builtin_mve_vabdq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14392 }
14393
14394 __extension__ extern __inline uint32x4_t
14395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14396 __arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14397 {
14398 return __builtin_mve_vabdq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14399 }
14400
14401 __extension__ extern __inline int8x16_t
14402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14403 __arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14404 {
14405 return __builtin_mve_vabsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14406 }
14407
14408 __extension__ extern __inline int16x8_t
14409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14410 __arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14411 {
14412 return __builtin_mve_vabsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14413 }
14414
14415 __extension__ extern __inline int32x4_t
14416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14417 __arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14418 {
14419 return __builtin_mve_vabsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14420 }
14421
14422 __extension__ extern __inline int8x16_t
14423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14424 __arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14425 {
14426 return __builtin_mve_vaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14427 }
14428
14429 __extension__ extern __inline int16x8_t
14430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14431 __arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14432 {
14433 return __builtin_mve_vaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14434 }
14435
14436 __extension__ extern __inline int32x4_t
14437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14438 __arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14439 {
14440 return __builtin_mve_vaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14441 }
14442
14443 __extension__ extern __inline int8x16_t
14444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14445 __arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14446 {
14447 return __builtin_mve_vaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14448 }
14449
14450 __extension__ extern __inline int16x8_t
14451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14452 __arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14453 {
14454 return __builtin_mve_vaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14455 }
14456
14457 __extension__ extern __inline int32x4_t
14458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14459 __arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14460 {
14461 return __builtin_mve_vaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14462 }
14463
14464 __extension__ extern __inline uint8x16_t
14465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14466 __arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14467 {
14468 return __builtin_mve_vaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14469 }
14470
14471 __extension__ extern __inline uint16x8_t
14472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14473 __arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14474 {
14475 return __builtin_mve_vaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14476 }
14477
14478 __extension__ extern __inline uint32x4_t
14479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14480 __arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14481 {
14482 return __builtin_mve_vaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14483 }
14484
14485 __extension__ extern __inline uint8x16_t
14486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14487 __arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14488 {
14489 return __builtin_mve_vaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14490 }
14491
14492 __extension__ extern __inline uint16x8_t
14493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14494 __arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14495 {
14496 return __builtin_mve_vaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14497 }
14498
14499 __extension__ extern __inline uint32x4_t
14500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14501 __arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14502 {
14503 return __builtin_mve_vaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14504 }
14505
14506 __extension__ extern __inline int8x16_t
14507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14508 __arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14509 {
14510 return __builtin_mve_vclsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14511 }
14512
14513 __extension__ extern __inline int16x8_t
14514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14515 __arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14516 {
14517 return __builtin_mve_vclsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14518 }
14519
14520 __extension__ extern __inline int32x4_t
14521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14522 __arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14523 {
14524 return __builtin_mve_vclsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14525 }
14526
14527 __extension__ extern __inline int8x16_t
14528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14529 __arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14530 {
14531 return __builtin_mve_vclzq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14532 }
14533
14534 __extension__ extern __inline int16x8_t
14535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14536 __arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14537 {
14538 return __builtin_mve_vclzq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14539 }
14540
14541 __extension__ extern __inline int32x4_t
14542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14543 __arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14544 {
14545 return __builtin_mve_vclzq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14546 }
14547
14548 __extension__ extern __inline uint8x16_t
14549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14550 __arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
14551 {
14552 return __builtin_mve_vclzq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
14553 }
14554
14555 __extension__ extern __inline uint16x8_t
14556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14557 __arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
14558 {
14559 return __builtin_mve_vclzq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
14560 }
14561
14562 __extension__ extern __inline uint32x4_t
14563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14564 __arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
14565 {
14566 return __builtin_mve_vclzq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
14567 }
14568
14569 __extension__ extern __inline int8x16_t
14570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14571 __arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p)
14572 {
14573 return __builtin_mve_vnegq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
14574 }
14575
14576 __extension__ extern __inline int16x8_t
14577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14578 __arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p)
14579 {
14580 return __builtin_mve_vnegq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
14581 }
14582
14583 __extension__ extern __inline int32x4_t
14584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14585 __arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p)
14586 {
14587 return __builtin_mve_vnegq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
14588 }
14589
14590 __extension__ extern __inline int8x16_t
14591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14592 __arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14593 {
14594 return __builtin_mve_vmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14595 }
14596
14597 __extension__ extern __inline int16x8_t
14598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14599 __arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14600 {
14601 return __builtin_mve_vmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14602 }
14603
14604 __extension__ extern __inline int32x4_t
14605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14606 __arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14607 {
14608 return __builtin_mve_vmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14609 }
14610
14611 __extension__ extern __inline uint8x16_t
14612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14613 __arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14614 {
14615 return __builtin_mve_vmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14616 }
14617
14618 __extension__ extern __inline uint16x8_t
14619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14620 __arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14621 {
14622 return __builtin_mve_vmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14623 }
14624
14625 __extension__ extern __inline uint32x4_t
14626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14627 __arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14628 {
14629 return __builtin_mve_vmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14630 }
14631
14632 __extension__ extern __inline uint16x8_t
14633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14634 __arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14635 {
14636 return __builtin_mve_vmullbq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14637 }
14638
14639 __extension__ extern __inline uint32x4_t
14640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14641 __arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14642 {
14643 return __builtin_mve_vmullbq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14644 }
14645
14646 __extension__ extern __inline int16x8_t
14647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14648 __arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14649 {
14650 return __builtin_mve_vmullbq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14651 }
14652
14653 __extension__ extern __inline int32x4_t
14654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14655 __arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14656 {
14657 return __builtin_mve_vmullbq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
14658 }
14659
14660 __extension__ extern __inline int64x2_t
14661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14662 __arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14663 {
14664 return __builtin_mve_vmullbq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
14665 }
14666
14667 __extension__ extern __inline uint16x8_t
14668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14669 __arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14670 {
14671 return __builtin_mve_vmullbq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14672 }
14673
14674 __extension__ extern __inline uint32x4_t
14675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14676 __arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14677 {
14678 return __builtin_mve_vmullbq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14679 }
14680
14681 __extension__ extern __inline uint64x2_t
14682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14683 __arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14684 {
14685 return __builtin_mve_vmullbq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
14686 }
14687
14688 __extension__ extern __inline uint16x8_t
14689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14690 __arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14691 {
14692 return __builtin_mve_vmulltq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14693 }
14694
14695 __extension__ extern __inline uint32x4_t
14696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14697 __arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14698 {
14699 return __builtin_mve_vmulltq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14700 }
14701
14702 __extension__ extern __inline int16x8_t
14703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14704 __arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14705 {
14706 return __builtin_mve_vmulltq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14707 }
14708
14709 __extension__ extern __inline int32x4_t
14710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14711 __arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14712 {
14713 return __builtin_mve_vmulltq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
14714 }
14715
14716 __extension__ extern __inline int64x2_t
14717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14718 __arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14719 {
14720 return __builtin_mve_vmulltq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
14721 }
14722
14723 __extension__ extern __inline uint16x8_t
14724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14725 __arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14726 {
14727 return __builtin_mve_vmulltq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14728 }
14729
14730 __extension__ extern __inline uint32x4_t
14731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14732 __arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14733 {
14734 return __builtin_mve_vmulltq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
14735 }
14736
14737 __extension__ extern __inline uint64x2_t
14738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14739 __arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14740 {
14741 return __builtin_mve_vmulltq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
14742 }
14743
14744 __extension__ extern __inline int8x16_t
14745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14746 __arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14747 {
14748 return __builtin_mve_vmulq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14749 }
14750
14751 __extension__ extern __inline int16x8_t
14752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14753 __arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14754 {
14755 return __builtin_mve_vmulq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14756 }
14757
14758 __extension__ extern __inline int32x4_t
14759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14760 __arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14761 {
14762 return __builtin_mve_vmulq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14763 }
14764
14765 __extension__ extern __inline int8x16_t
14766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14767 __arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14768 {
14769 return __builtin_mve_vmulq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14770 }
14771
14772 __extension__ extern __inline int16x8_t
14773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14774 __arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14775 {
14776 return __builtin_mve_vmulq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14777 }
14778
14779 __extension__ extern __inline int32x4_t
14780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14781 __arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14782 {
14783 return __builtin_mve_vmulq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14784 }
14785
14786 __extension__ extern __inline uint8x16_t
14787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14788 __arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14789 {
14790 return __builtin_mve_vmulq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14791 }
14792
14793 __extension__ extern __inline uint16x8_t
14794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14795 __arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14796 {
14797 return __builtin_mve_vmulq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14798 }
14799
14800 __extension__ extern __inline uint32x4_t
14801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14802 __arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14803 {
14804 return __builtin_mve_vmulq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14805 }
14806
14807 __extension__ extern __inline uint8x16_t
14808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14809 __arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14810 {
14811 return __builtin_mve_vmulq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14812 }
14813
14814 __extension__ extern __inline uint16x8_t
14815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14816 __arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14817 {
14818 return __builtin_mve_vmulq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14819 }
14820
14821 __extension__ extern __inline uint32x4_t
14822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14823 __arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14824 {
14825 return __builtin_mve_vmulq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14826 }
14827
14828 __extension__ extern __inline int8x16_t
14829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14830 __arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14831 {
14832 return __builtin_mve_vsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14833 }
14834
14835 __extension__ extern __inline int16x8_t
14836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14837 __arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14838 {
14839 return __builtin_mve_vsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14840 }
14841
14842 __extension__ extern __inline int32x4_t
14843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14844 __arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14845 {
14846 return __builtin_mve_vsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14847 }
14848
14849 __extension__ extern __inline int8x16_t
14850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14851 __arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14852 {
14853 return __builtin_mve_vsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14854 }
14855
14856 __extension__ extern __inline int16x8_t
14857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14858 __arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
14859 {
14860 return __builtin_mve_vsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14861 }
14862
14863 __extension__ extern __inline int32x4_t
14864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14865 __arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
14866 {
14867 return __builtin_mve_vsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14868 }
14869
14870 __extension__ extern __inline uint8x16_t
14871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14872 __arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14873 {
14874 return __builtin_mve_vsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14875 }
14876
14877 __extension__ extern __inline uint16x8_t
14878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14879 __arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14880 {
14881 return __builtin_mve_vsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14882 }
14883
14884 __extension__ extern __inline uint32x4_t
14885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14886 __arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14887 {
14888 return __builtin_mve_vsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14889 }
14890
14891 __extension__ extern __inline uint8x16_t
14892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14893 __arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
14894 {
14895 return __builtin_mve_vsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14896 }
14897
14898 __extension__ extern __inline uint16x8_t
14899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14900 __arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
14901 {
14902 return __builtin_mve_vsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14903 }
14904
14905 __extension__ extern __inline uint32x4_t
14906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14907 __arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
14908 {
14909 return __builtin_mve_vsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14910 }
14911
14912 __extension__ extern __inline int8x16_t
14913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14914 __arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14915 {
14916 return __builtin_mve_vcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14917 }
14918
14919 __extension__ extern __inline int16x8_t
14920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14921 __arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14922 {
14923 return __builtin_mve_vcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14924 }
14925
14926 __extension__ extern __inline int32x4_t
14927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14928 __arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14929 {
14930 return __builtin_mve_vcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14931 }
14932
14933 __extension__ extern __inline uint8x16_t
14934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14935 __arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14936 {
14937 return __builtin_mve_vcaddq_rot90_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14938 }
14939
14940 __extension__ extern __inline uint16x8_t
14941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14942 __arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14943 {
14944 return __builtin_mve_vcaddq_rot90_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14945 }
14946
14947 __extension__ extern __inline uint32x4_t
14948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14949 __arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14950 {
14951 return __builtin_mve_vcaddq_rot90_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14952 }
14953
14954 __extension__ extern __inline int8x16_t
14955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14956 __arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
14957 {
14958 return __builtin_mve_vcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
14959 }
14960
14961 __extension__ extern __inline int16x8_t
14962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14963 __arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
14964 {
14965 return __builtin_mve_vcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
14966 }
14967
14968 __extension__ extern __inline int32x4_t
14969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14970 __arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
14971 {
14972 return __builtin_mve_vcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
14973 }
14974
14975 __extension__ extern __inline uint8x16_t
14976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14977 __arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
14978 {
14979 return __builtin_mve_vcaddq_rot270_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
14980 }
14981
14982 __extension__ extern __inline uint16x8_t
14983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14984 __arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
14985 {
14986 return __builtin_mve_vcaddq_rot270_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
14987 }
14988
14989 __extension__ extern __inline uint32x4_t
14990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14991 __arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
14992 {
14993 return __builtin_mve_vcaddq_rot270_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
14994 }
14995
14996 __extension__ extern __inline int8x16_t
14997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14998 __arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
14999 {
15000 return __builtin_mve_vhaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15001 }
15002
15003 __extension__ extern __inline int16x8_t
15004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15005 __arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
15006 {
15007 return __builtin_mve_vhaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15008 }
15009
15010 __extension__ extern __inline int32x4_t
15011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15012 __arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15013 {
15014 return __builtin_mve_vhaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15015 }
15016
15017 __extension__ extern __inline uint8x16_t
15018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15019 __arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
15020 {
15021 return __builtin_mve_vhaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15022 }
15023
15024 __extension__ extern __inline uint16x8_t
15025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15026 __arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
15027 {
15028 return __builtin_mve_vhaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15029 }
15030
15031 __extension__ extern __inline uint32x4_t
15032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15033 __arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
15034 {
15035 return __builtin_mve_vhaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15036 }
15037
15038 __extension__ extern __inline int8x16_t
15039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15040 __arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15041 {
15042 return __builtin_mve_vhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15043 }
15044
15045 __extension__ extern __inline int16x8_t
15046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15047 __arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15048 {
15049 return __builtin_mve_vhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15050 }
15051
15052 __extension__ extern __inline int32x4_t
15053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15054 __arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15055 {
15056 return __builtin_mve_vhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15057 }
15058
15059 __extension__ extern __inline uint8x16_t
15060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15061 __arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15062 {
15063 return __builtin_mve_vhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15064 }
15065
15066 __extension__ extern __inline uint16x8_t
15067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15068 __arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15069 {
15070 return __builtin_mve_vhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15071 }
15072
15073 __extension__ extern __inline uint32x4_t
15074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15075 __arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15076 {
15077 return __builtin_mve_vhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15078 }
15079
15080 __extension__ extern __inline int8x16_t
15081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15082 __arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15083 {
15084 return __builtin_mve_vhcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15085 }
15086
15087 __extension__ extern __inline int16x8_t
15088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15089 __arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15090 {
15091 return __builtin_mve_vhcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15092 }
15093
15094 __extension__ extern __inline int32x4_t
15095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15096 __arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15097 {
15098 return __builtin_mve_vhcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15099 }
15100
15101 __extension__ extern __inline int8x16_t
15102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15103 __arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15104 {
15105 return __builtin_mve_vhcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15106 }
15107
15108 __extension__ extern __inline int16x8_t
15109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15110 __arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15111 {
15112 return __builtin_mve_vhcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15113 }
15114
15115 __extension__ extern __inline int32x4_t
15116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15117 __arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15118 {
15119 return __builtin_mve_vhcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15120 }
15121
15122 __extension__ extern __inline int8x16_t
15123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15124 __arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
15125 {
15126 return __builtin_mve_vhsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15127 }
15128
15129 __extension__ extern __inline int16x8_t
15130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15131 __arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
15132 {
15133 return __builtin_mve_vhsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15134 }
15135
15136 __extension__ extern __inline int32x4_t
15137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15138 __arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15139 {
15140 return __builtin_mve_vhsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15141 }
15142
15143 __extension__ extern __inline uint8x16_t
15144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15145 __arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
15146 {
15147 return __builtin_mve_vhsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15148 }
15149
15150 __extension__ extern __inline uint16x8_t
15151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15152 __arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
15153 {
15154 return __builtin_mve_vhsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15155 }
15156
15157 __extension__ extern __inline uint32x4_t
15158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15159 __arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
15160 {
15161 return __builtin_mve_vhsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15162 }
15163
15164 __extension__ extern __inline int8x16_t
15165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15166 __arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15167 {
15168 return __builtin_mve_vhsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15169 }
15170
15171 __extension__ extern __inline int16x8_t
15172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15173 __arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15174 {
15175 return __builtin_mve_vhsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15176 }
15177
15178 __extension__ extern __inline int32x4_t
15179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15180 __arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15181 {
15182 return __builtin_mve_vhsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15183 }
15184
15185 __extension__ extern __inline uint8x16_t
15186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15187 __arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15188 {
15189 return __builtin_mve_vhsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15190 }
15191
15192 __extension__ extern __inline uint16x8_t
15193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15194 __arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15195 {
15196 return __builtin_mve_vhsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15197 }
15198
15199 __extension__ extern __inline uint32x4_t
15200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15201 __arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15202 {
15203 return __builtin_mve_vhsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15204 }
15205
15206 __extension__ extern __inline int8x16_t
15207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15208 __arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15209 {
15210 return __builtin_mve_vrhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15211 }
15212
15213 __extension__ extern __inline int16x8_t
15214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15215 __arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15216 {
15217 return __builtin_mve_vrhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15218 }
15219
15220 __extension__ extern __inline int32x4_t
15221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15222 __arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15223 {
15224 return __builtin_mve_vrhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15225 }
15226
15227 __extension__ extern __inline uint8x16_t
15228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15229 __arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15230 {
15231 return __builtin_mve_vrhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15232 }
15233
15234 __extension__ extern __inline uint16x8_t
15235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15236 __arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15237 {
15238 return __builtin_mve_vrhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15239 }
15240
15241 __extension__ extern __inline uint32x4_t
15242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15243 __arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15244 {
15245 return __builtin_mve_vrhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15246 }
15247
15248 __extension__ extern __inline int8x16_t
15249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15250 __arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15251 {
15252 return __builtin_mve_vrmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15253 }
15254
15255 __extension__ extern __inline int16x8_t
15256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15257 __arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15258 {
15259 return __builtin_mve_vrmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15260 }
15261
15262 __extension__ extern __inline int32x4_t
15263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15264 __arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15265 {
15266 return __builtin_mve_vrmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15267 }
15268
15269 __extension__ extern __inline uint8x16_t
15270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15271 __arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15272 {
15273 return __builtin_mve_vrmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15274 }
15275
15276 __extension__ extern __inline uint16x8_t
15277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15278 __arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15279 {
15280 return __builtin_mve_vrmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15281 }
15282
15283 __extension__ extern __inline uint32x4_t
15284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15285 __arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15286 {
15287 return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15288 }
15289
15290 __extension__ extern __inline int8x16_t
15291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15292 __arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15293 {
15294 return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15295 }
15296
15297 __extension__ extern __inline int16x8_t
15298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15299 __arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15300 {
15301 return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15302 }
15303
15304 __extension__ extern __inline int32x4_t
15305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15306 __arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15307 {
15308 return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15309 }
15310
15311 __extension__ extern __inline uint8x16_t
15312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15313 __arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15314 {
15315 return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15316 }
15317
15318 __extension__ extern __inline uint16x8_t
15319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15320 __arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15321 {
15322 return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15323 }
15324
15325 __extension__ extern __inline uint32x4_t
15326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15327 __arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15328 {
15329 return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15330 }
15331
15332 __extension__ extern __inline int8x16_t
15333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15334 __arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15335 {
15336 return __builtin_mve_vbicq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15337 }
15338
15339 __extension__ extern __inline int16x8_t
15340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15341 __arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15342 {
15343 return __builtin_mve_vbicq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15344 }
15345
15346 __extension__ extern __inline int32x4_t
15347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15348 __arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15349 {
15350 return __builtin_mve_vbicq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15351 }
15352
15353 __extension__ extern __inline uint8x16_t
15354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15355 __arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15356 {
15357 return __builtin_mve_vbicq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15358 }
15359
15360 __extension__ extern __inline uint16x8_t
15361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15362 __arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15363 {
15364 return __builtin_mve_vbicq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15365 }
15366
15367 __extension__ extern __inline uint32x4_t
15368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15369 __arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15370 {
15371 return __builtin_mve_vbicq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15372 }
15373
15374 __extension__ extern __inline int8x16_t
15375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15376 __arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
15377 {
15378 return __builtin_mve_vbrsrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15379 }
15380
15381 __extension__ extern __inline int16x8_t
15382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15383 __arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
15384 {
15385 return __builtin_mve_vbrsrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15386 }
15387
15388 __extension__ extern __inline int32x4_t
15389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15390 __arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
15391 {
15392 return __builtin_mve_vbrsrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15393 }
15394
15395 __extension__ extern __inline uint8x16_t
15396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15397 __arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
15398 {
15399 return __builtin_mve_vbrsrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15400 }
15401
15402 __extension__ extern __inline uint16x8_t
15403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15404 __arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
15405 {
15406 return __builtin_mve_vbrsrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15407 }
15408
15409 __extension__ extern __inline uint32x4_t
15410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15411 __arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
15412 {
15413 return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15414 }
15415
15416 __extension__ extern __inline int8x16_t
15417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15418 __arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15419 {
15420 return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15421 }
15422
15423 __extension__ extern __inline int16x8_t
15424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15425 __arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15426 {
15427 return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15428 }
15429
15430 __extension__ extern __inline int32x4_t
15431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15432 __arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15433 {
15434 return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15435 }
15436
15437 __extension__ extern __inline uint8x16_t
15438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15439 __arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15440 {
15441 return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15442 }
15443
15444 __extension__ extern __inline uint16x8_t
15445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15446 __arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15447 {
15448 return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15449 }
15450
15451 __extension__ extern __inline uint32x4_t
15452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15453 __arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15454 {
15455 return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15456 }
15457
15458 __extension__ extern __inline int16x8_t
15459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15460 __arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15461 {
15462 return __builtin_mve_vmovlbq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
15463 }
15464
15465 __extension__ extern __inline int32x4_t
15466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15467 __arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15468 {
15469 return __builtin_mve_vmovlbq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
15470 }
15471
15472 __extension__ extern __inline uint16x8_t
15473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15474 __arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15475 {
15476 return __builtin_mve_vmovlbq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
15477 }
15478
15479 __extension__ extern __inline uint32x4_t
15480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15481 __arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15482 {
15483 return __builtin_mve_vmovlbq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
15484 }
15485
15486 __extension__ extern __inline int16x8_t
15487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15488 __arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15489 {
15490 return __builtin_mve_vmovltq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
15491 }
15492
15493 __extension__ extern __inline int32x4_t
15494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15495 __arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15496 {
15497 return __builtin_mve_vmovltq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
15498 }
15499
15500 __extension__ extern __inline uint16x8_t
15501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15502 __arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15503 {
15504 return __builtin_mve_vmovltq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
15505 }
15506
15507 __extension__ extern __inline uint32x4_t
15508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15509 __arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15510 {
15511 return __builtin_mve_vmovltq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
15512 }
15513
15514 __extension__ extern __inline int8x16_t
15515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15516 __arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p)
15517 {
15518 return __builtin_mve_vmvnq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15519 }
15520
15521 __extension__ extern __inline int16x8_t
15522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15523 __arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p)
15524 {
15525 return __builtin_mve_vmvnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15526 }
15527
15528 __extension__ extern __inline int32x4_t
15529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15530 __arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p)
15531 {
15532 return __builtin_mve_vmvnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
15533 }
15534
15535 __extension__ extern __inline uint8x16_t
15536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15537 __arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15538 {
15539 return __builtin_mve_vmvnq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15540 }
15541
15542 __extension__ extern __inline uint16x8_t
15543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15544 __arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15545 {
15546 return __builtin_mve_vmvnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15547 }
15548
15549 __extension__ extern __inline uint32x4_t
15550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15551 __arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
15552 {
15553 return __builtin_mve_vmvnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
15554 }
15555
15556 __extension__ extern __inline int16x8_t
15557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15558 __arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p)
15559 {
15560 return __builtin_mve_vmvnq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __imm, __p);
15561 }
15562
15563 __extension__ extern __inline int32x4_t
15564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15565 __arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p)
15566 {
15567 return __builtin_mve_vmvnq_m_n_sv4si (__arm_vuninitializedq_s32 (), __imm, __p);
15568 }
15569
15570 __extension__ extern __inline uint16x8_t
15571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15572 __arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p)
15573 {
15574 return __builtin_mve_vmvnq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __imm, __p);
15575 }
15576
15577 __extension__ extern __inline uint32x4_t
15578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15579 __arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p)
15580 {
15581 return __builtin_mve_vmvnq_m_n_uv4si (__arm_vuninitializedq_u32 (), __imm, __p);
15582 }
15583
15584 __extension__ extern __inline int8x16_t
15585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15586 __arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15587 {
15588 return __builtin_mve_vornq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15589 }
15590
15591 __extension__ extern __inline int16x8_t
15592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15593 __arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15594 {
15595 return __builtin_mve_vornq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15596 }
15597
15598 __extension__ extern __inline int32x4_t
15599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15600 __arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15601 {
15602 return __builtin_mve_vornq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15603 }
15604
15605 __extension__ extern __inline uint8x16_t
15606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15607 __arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15608 {
15609 return __builtin_mve_vornq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15610 }
15611
15612 __extension__ extern __inline uint16x8_t
15613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15614 __arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15615 {
15616 return __builtin_mve_vornq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15617 }
15618
15619 __extension__ extern __inline uint32x4_t
15620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15621 __arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15622 {
15623 return __builtin_mve_vornq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15624 }
15625
15626 __extension__ extern __inline int8x16_t
15627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15628 __arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15629 {
15630 return __builtin_mve_vorrq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15631 }
15632
15633 __extension__ extern __inline int16x8_t
15634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15635 __arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15636 {
15637 return __builtin_mve_vorrq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15638 }
15639
15640 __extension__ extern __inline int32x4_t
15641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15642 __arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15643 {
15644 return __builtin_mve_vorrq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15645 }
15646
15647 __extension__ extern __inline uint8x16_t
15648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15649 __arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
15650 {
15651 return __builtin_mve_vorrq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15652 }
15653
15654 __extension__ extern __inline uint16x8_t
15655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15656 __arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
15657 {
15658 return __builtin_mve_vorrq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15659 }
15660
15661 __extension__ extern __inline uint32x4_t
15662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15663 __arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
15664 {
15665 return __builtin_mve_vorrq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15666 }
15667
15668 __extension__ extern __inline int8x16_t
15669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15670 __arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15671 {
15672 return __builtin_mve_vrev16q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15673 }
15674
15675 __extension__ extern __inline uint8x16_t
15676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15677 __arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15678 {
15679 return __builtin_mve_vrev16q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15680 }
15681
15682 __extension__ extern __inline int8x16_t
15683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15684 __arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15685 {
15686 return __builtin_mve_vrev32q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15687 }
15688
15689 __extension__ extern __inline int16x8_t
15690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15691 __arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p)
15692 {
15693 return __builtin_mve_vrev32q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15694 }
15695
15696 __extension__ extern __inline uint8x16_t
15697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15698 __arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15699 {
15700 return __builtin_mve_vrev32q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15701 }
15702
15703 __extension__ extern __inline uint16x8_t
15704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15705 __arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15706 {
15707 return __builtin_mve_vrev32q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15708 }
15709
15710 __extension__ extern __inline int8x16_t
15711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15712 __arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p)
15713 {
15714 return __builtin_mve_vrev64q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
15715 }
15716
15717 __extension__ extern __inline int16x8_t
15718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15719 __arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p)
15720 {
15721 return __builtin_mve_vrev64q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
15722 }
15723
15724 __extension__ extern __inline int32x4_t
15725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15726 __arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p)
15727 {
15728 return __builtin_mve_vrev64q_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
15729 }
15730
15731 __extension__ extern __inline uint8x16_t
15732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15733 __arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
15734 {
15735 return __builtin_mve_vrev64q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
15736 }
15737
15738 __extension__ extern __inline uint16x8_t
15739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15740 __arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
15741 {
15742 return __builtin_mve_vrev64q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
15743 }
15744
15745 __extension__ extern __inline uint32x4_t
15746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15747 __arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p)
15748 {
15749 return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
15750 }
15751
15752 __extension__ extern __inline int8x16_t
15753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15754 __arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15755 {
15756 return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15757 }
15758
15759 __extension__ extern __inline int16x8_t
15760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15761 __arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15762 {
15763 return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15764 }
15765
15766 __extension__ extern __inline int32x4_t
15767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15768 __arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15769 {
15770 return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15771 }
15772
15773 __extension__ extern __inline uint8x16_t
15774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15775 __arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15776 {
15777 return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15778 }
15779
15780 __extension__ extern __inline uint16x8_t
15781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15782 __arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15783 {
15784 return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15785 }
15786
15787 __extension__ extern __inline uint32x4_t
15788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15789 __arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15790 {
15791 return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15792 }
15793
15794 __extension__ extern __inline int16x8_t
15795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15796 __arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15797 {
15798 return __builtin_mve_vshllbq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15799 }
15800
15801 __extension__ extern __inline int32x4_t
15802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15803 __arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15804 {
15805 return __builtin_mve_vshllbq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15806 }
15807
15808 __extension__ extern __inline uint16x8_t
15809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15810 __arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15811 {
15812 return __builtin_mve_vshllbq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15813 }
15814
15815 __extension__ extern __inline uint32x4_t
15816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15817 __arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15818 {
15819 return __builtin_mve_vshllbq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15820 }
15821
15822 __extension__ extern __inline int16x8_t
15823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15824 __arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15825 {
15826 return __builtin_mve_vshlltq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15827 }
15828
15829 __extension__ extern __inline int32x4_t
15830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15831 __arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15832 {
15833 return __builtin_mve_vshlltq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15834 }
15835
15836 __extension__ extern __inline uint16x8_t
15837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15838 __arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15839 {
15840 return __builtin_mve_vshlltq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15841 }
15842
15843 __extension__ extern __inline uint32x4_t
15844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15845 __arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15846 {
15847 return __builtin_mve_vshlltq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15848 }
15849
15850 __extension__ extern __inline int8x16_t
15851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15852 __arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15853 {
15854 return __builtin_mve_vshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
15855 }
15856
15857 __extension__ extern __inline int16x8_t
15858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15859 __arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15860 {
15861 return __builtin_mve_vshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
15862 }
15863
15864 __extension__ extern __inline int32x4_t
15865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15866 __arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15867 {
15868 return __builtin_mve_vshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
15869 }
15870
15871 __extension__ extern __inline uint8x16_t
15872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15873 __arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
15874 {
15875 return __builtin_mve_vshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
15876 }
15877
15878 __extension__ extern __inline uint16x8_t
15879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15880 __arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
15881 {
15882 return __builtin_mve_vshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
15883 }
15884
15885 __extension__ extern __inline uint32x4_t
15886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15887 __arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
15888 {
15889 return __builtin_mve_vshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
15890 }
15891
15892 __extension__ extern __inline int8x16_t
15893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15894 __arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15895 {
15896 return __builtin_mve_vshlq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15897 }
15898
15899 __extension__ extern __inline int16x8_t
15900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15901 __arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15902 {
15903 return __builtin_mve_vshlq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15904 }
15905
15906 __extension__ extern __inline int32x4_t
15907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15908 __arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15909 {
15910 return __builtin_mve_vshlq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15911 }
15912
15913 __extension__ extern __inline uint8x16_t
15914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15915 __arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15916 {
15917 return __builtin_mve_vshlq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
15918 }
15919
15920 __extension__ extern __inline uint16x8_t
15921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15922 __arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15923 {
15924 return __builtin_mve_vshlq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15925 }
15926
15927 __extension__ extern __inline uint32x4_t
15928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15929 __arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
15930 {
15931 return __builtin_mve_vshlq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15932 }
15933
15934 __extension__ extern __inline int8x16_t
15935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15936 __arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15937 {
15938 return __builtin_mve_vrshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15939 }
15940
15941 __extension__ extern __inline int16x8_t
15942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15943 __arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15944 {
15945 return __builtin_mve_vrshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15946 }
15947
15948 __extension__ extern __inline int32x4_t
15949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15950 __arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15951 {
15952 return __builtin_mve_vrshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15953 }
15954
15955 __extension__ extern __inline uint8x16_t
15956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15957 __arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
15958 {
15959 return __builtin_mve_vrshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
15960 }
15961
15962 __extension__ extern __inline uint16x8_t
15963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15964 __arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
15965 {
15966 return __builtin_mve_vrshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
15967 }
15968
15969 __extension__ extern __inline uint32x4_t
15970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15971 __arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
15972 {
15973 return __builtin_mve_vrshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
15974 }
15975
15976 __extension__ extern __inline int8x16_t
15977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15978 __arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
15979 {
15980 return __builtin_mve_vshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
15981 }
15982
15983 __extension__ extern __inline int16x8_t
15984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15985 __arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
15986 {
15987 return __builtin_mve_vshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
15988 }
15989
15990 __extension__ extern __inline int32x4_t
15991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15992 __arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
15993 {
15994 return __builtin_mve_vshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
15995 }
15996
15997 __extension__ extern __inline uint8x16_t
15998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15999 __arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
16000 {
16001 return __builtin_mve_vshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
16002 }
16003
16004 __extension__ extern __inline uint16x8_t
16005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16006 __arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
16007 {
16008 return __builtin_mve_vshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
16009 }
16010
16011 __extension__ extern __inline uint32x4_t
16012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16013 __arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
16014 {
16015 return __builtin_mve_vshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
16016 }
16017
16018 __extension__ extern __inline int32x4_t
16019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16020 __arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
16021 {
16022 int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b);
16023 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16024 return __res;
16025 }
16026
16027 __extension__ extern __inline uint32x4_t
16028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16029 __arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
16030 {
16031 uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b);
16032 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16033 return __res;
16034 }
16035
16036 __extension__ extern __inline int32x4_t
16037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16038 __arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16039 {
16040 int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p);
16041 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16042 return __res;
16043 }
16044
16045 __extension__ extern __inline uint32x4_t
16046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16047 __arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16048 {
16049 uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p);
16050 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16051 return __res;
16052 }
16053
16054 __extension__ extern __inline int32x4_t
16055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16056 __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
16057 {
16058 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16059 int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
16060 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16061 return __res;
16062 }
16063
16064 __extension__ extern __inline uint32x4_t
16065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16066 __arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
16067 {
16068 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16069 uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
16070 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16071 return __res;
16072 }
16073
16074 __extension__ extern __inline int32x4_t
16075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16076 __arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16077 {
16078 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16079 int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
16080 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16081 return __res;
16082 }
16083
16084 __extension__ extern __inline uint32x4_t
16085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16086 __arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16087 {
16088 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16089 uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p);
16090 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16091 return __res;
16092 }
16093
16094 __extension__ extern __inline int32x4_t
16095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16096 __arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
16097 {
16098 int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b);
16099 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16100 return __res;
16101 }
16102
16103 __extension__ extern __inline uint32x4_t
16104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16105 __arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
16106 {
16107 uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b);
16108 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16109 return __res;
16110 }
16111
16112 __extension__ extern __inline int32x4_t
16113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16114 __arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16115 {
16116 int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p);
16117 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16118 return __res;
16119 }
16120
16121 __extension__ extern __inline uint32x4_t
16122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16123 __arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
16124 {
16125 uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p);
16126 *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16127 return __res;
16128 }
16129
16130 __extension__ extern __inline int32x4_t
16131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16132 __arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
16133 {
16134 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16135 int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
16136 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16137 return __res;
16138 }
16139
16140 __extension__ extern __inline uint32x4_t
16141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16142 __arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
16143 {
16144 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16145 uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
16146 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16147 return __res;
16148 }
16149
16150 __extension__ extern __inline int32x4_t
16151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16152 __arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16153 {
16154 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16155 int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
16156 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16157 return __res;
16158 }
16159
16160 __extension__ extern __inline uint32x4_t
16161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16162 __arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
16163 {
16164 __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | (*__carry << 29));
16165 uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p);
16166 *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
16167 return __res;
16168 }
16169
16170 __extension__ extern __inline void
16171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16172 __arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
16173 {
16174 return vstrbq_p_u8 (__addr, __value, __p);
16175 }
16176
16177 __extension__ extern __inline void
16178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16179 __arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
16180 {
16181 return vstrbq_p_s8 (__addr, __value, __p);
16182 }
16183
16184 __extension__ extern __inline void
16185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16186 __arm_vst2q_s8 (int8_t * __addr, int8x16x2_t __value)
16187 {
16188 union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16189 __rv.__i = __value;
16190 __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
16191 }
16192
16193 __extension__ extern __inline void
16194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16195 __arm_vst2q_u8 (uint8_t * __addr, uint8x16x2_t __value)
16196 {
16197 union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16198 __rv.__i = __value;
16199 __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
16200 }
16201
16202 __extension__ extern __inline uint8x16_t
16203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16204 __arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p)
16205 {
16206 return vldrbq_z_u8 ( __base, __p);
16207 }
16208
16209 __extension__ extern __inline int8x16_t
16210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16211 __arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p)
16212 {
16213 return vldrbq_z_s8 ( __base, __p);
16214 }
16215
16216 __extension__ extern __inline int8x16x2_t
16217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16218 __arm_vld2q_s8 (int8_t const * __addr)
16219 {
16220 union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16221 __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
16222 return __rv.__i;
16223 }
16224
16225 __extension__ extern __inline uint8x16x2_t
16226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16227 __arm_vld2q_u8 (uint8_t const * __addr)
16228 {
16229 union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
16230 __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
16231 return __rv.__i;
16232 }
16233
16234 __extension__ extern __inline int8x16x4_t
16235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16236 __arm_vld4q_s8 (int8_t const * __addr)
16237 {
16238 union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
16239 __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
16240 return __rv.__i;
16241 }
16242
16243 __extension__ extern __inline uint8x16x4_t
16244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16245 __arm_vld4q_u8 (uint8_t const * __addr)
16246 {
16247 union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
16248 __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
16249 return __rv.__i;
16250 }
16251
16252 __extension__ extern __inline void
16253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16254 __arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
16255 {
16256 return vstrhq_p_u16 (__addr, __value, __p);
16257 }
16258
16259 __extension__ extern __inline void
16260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16261 __arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
16262 {
16263 return vstrhq_p_s16 (__addr, __value, __p);
16264 }
16265
16266 __extension__ extern __inline void
16267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16268 __arm_vst2q_s16 (int16_t * __addr, int16x8x2_t __value)
16269 {
16270 union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16271 __rv.__i = __value;
16272 __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
16273 }
16274
16275 __extension__ extern __inline void
16276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16277 __arm_vst2q_u16 (uint16_t * __addr, uint16x8x2_t __value)
16278 {
16279 union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16280 __rv.__i = __value;
16281 __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
16282 }
16283
16284 __extension__ extern __inline uint16x8_t
16285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16286 __arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p)
16287 {
16288 return vldrhq_z_u16 ( __base, __p);
16289 }
16290
16291 __extension__ extern __inline int16x8_t
16292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16293 __arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p)
16294 {
16295 return vldrhq_z_s16 ( __base, __p);
16296 }
16297
16298 __extension__ extern __inline int16x8x2_t
16299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16300 __arm_vld2q_s16 (int16_t const * __addr)
16301 {
16302 union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16303 __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
16304 return __rv.__i;
16305 }
16306
16307 __extension__ extern __inline uint16x8x2_t
16308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16309 __arm_vld2q_u16 (uint16_t const * __addr)
16310 {
16311 union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
16312 __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
16313 return __rv.__i;
16314 }
16315
16316 __extension__ extern __inline int16x8x4_t
16317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16318 __arm_vld4q_s16 (int16_t const * __addr)
16319 {
16320 union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16321 __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
16322 return __rv.__i;
16323 }
16324
16325 __extension__ extern __inline uint16x8x4_t
16326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16327 __arm_vld4q_u16 (uint16_t const * __addr)
16328 {
16329 union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16330 __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
16331 return __rv.__i;
16332 }
16333
16334 __extension__ extern __inline void
16335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16336 __arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
16337 {
16338 return vstrwq_p_u32 (__addr, __value, __p);
16339 }
16340
16341 __extension__ extern __inline void
16342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16343 __arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
16344 {
16345 return vstrwq_p_s32 (__addr, __value, __p);
16346 }
16347
16348 __extension__ extern __inline void
16349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16350 __arm_vst2q_s32 (int32_t * __addr, int32x4x2_t __value)
16351 {
16352 union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16353 __rv.__i = __value;
16354 __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
16355 }
16356
16357 __extension__ extern __inline void
16358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16359 __arm_vst2q_u32 (uint32_t * __addr, uint32x4x2_t __value)
16360 {
16361 union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16362 __rv.__i = __value;
16363 __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
16364 }
16365
16366 __extension__ extern __inline uint32x4_t
16367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16368 __arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p)
16369 {
16370 return vldrwq_z_u32 ( __base, __p);
16371 }
16372
16373 __extension__ extern __inline int32x4_t
16374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16375 __arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p)
16376 {
16377 return vldrwq_z_s32 ( __base, __p);
16378 }
16379
16380 __extension__ extern __inline int32x4x2_t
16381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16382 __arm_vld2q_s32 (int32_t const * __addr)
16383 {
16384 union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16385 __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
16386 return __rv.__i;
16387 }
16388
16389 __extension__ extern __inline uint32x4x2_t
16390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16391 __arm_vld2q_u32 (uint32_t const * __addr)
16392 {
16393 union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
16394 __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
16395 return __rv.__i;
16396 }
16397
16398 __extension__ extern __inline int32x4x4_t
16399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16400 __arm_vld4q_s32 (int32_t const * __addr)
16401 {
16402 union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16403 __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
16404 return __rv.__i;
16405 }
16406
16407 __extension__ extern __inline uint32x4x4_t
16408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16409 __arm_vld4q_u32 (uint32_t const * __addr)
16410 {
16411 union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16412 __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
16413 return __rv.__i;
16414 }
16415
16416 __extension__ extern __inline int16x8_t
16417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16418 __arm_vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __idx)
16419 {
16420 __ARM_CHECK_LANEQ (__b, __idx);
16421 __b[__ARM_LANEQ(__b,__idx)] = __a;
16422 return __b;
16423 }
16424
16425 __extension__ extern __inline int32x4_t
16426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16427 __arm_vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __idx)
16428 {
16429 __ARM_CHECK_LANEQ (__b, __idx);
16430 __b[__ARM_LANEQ(__b,__idx)] = __a;
16431 return __b;
16432 }
16433
16434 __extension__ extern __inline int8x16_t
16435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16436 __arm_vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __idx)
16437 {
16438 __ARM_CHECK_LANEQ (__b, __idx);
16439 __b[__ARM_LANEQ(__b,__idx)] = __a;
16440 return __b;
16441 }
16442
16443 __extension__ extern __inline int64x2_t
16444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16445 __arm_vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __idx)
16446 {
16447 __ARM_CHECK_LANEQ (__b, __idx);
16448 __b[__ARM_LANEQ(__b,__idx)] = __a;
16449 return __b;
16450 }
16451
16452 __extension__ extern __inline uint8x16_t
16453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16454 __arm_vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __idx)
16455 {
16456 __ARM_CHECK_LANEQ (__b, __idx);
16457 __b[__ARM_LANEQ(__b,__idx)] = __a;
16458 return __b;
16459 }
16460
16461 __extension__ extern __inline uint16x8_t
16462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16463 __arm_vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __idx)
16464 {
16465 __ARM_CHECK_LANEQ (__b, __idx);
16466 __b[__ARM_LANEQ(__b,__idx)] = __a;
16467 return __b;
16468 }
16469
16470 __extension__ extern __inline uint32x4_t
16471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16472 __arm_vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __idx)
16473 {
16474 __ARM_CHECK_LANEQ (__b, __idx);
16475 __b[__ARM_LANEQ(__b,__idx)] = __a;
16476 return __b;
16477 }
16478
16479 __extension__ extern __inline uint64x2_t
16480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16481 __arm_vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __idx)
16482 {
16483 __ARM_CHECK_LANEQ (__b, __idx);
16484 __b[__ARM_LANEQ(__b,__idx)] = __a;
16485 return __b;
16486 }
16487
16488 __extension__ extern __inline int16_t
16489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16490 __arm_vgetq_lane_s16 (int16x8_t __a, const int __idx)
16491 {
16492 __ARM_CHECK_LANEQ (__a, __idx);
16493 return __a[__ARM_LANEQ(__a,__idx)];
16494 }
16495
16496 __extension__ extern __inline int32_t
16497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16498 __arm_vgetq_lane_s32 (int32x4_t __a, const int __idx)
16499 {
16500 __ARM_CHECK_LANEQ (__a, __idx);
16501 return __a[__ARM_LANEQ(__a,__idx)];
16502 }
16503
16504 __extension__ extern __inline int8_t
16505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16506 __arm_vgetq_lane_s8 (int8x16_t __a, const int __idx)
16507 {
16508 __ARM_CHECK_LANEQ (__a, __idx);
16509 return __a[__ARM_LANEQ(__a,__idx)];
16510 }
16511
16512 __extension__ extern __inline int64_t
16513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16514 __arm_vgetq_lane_s64 (int64x2_t __a, const int __idx)
16515 {
16516 __ARM_CHECK_LANEQ (__a, __idx);
16517 return __a[__ARM_LANEQ(__a,__idx)];
16518 }
16519
16520 __extension__ extern __inline uint8_t
16521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16522 __arm_vgetq_lane_u8 (uint8x16_t __a, const int __idx)
16523 {
16524 __ARM_CHECK_LANEQ (__a, __idx);
16525 return __a[__ARM_LANEQ(__a,__idx)];
16526 }
16527
16528 __extension__ extern __inline uint16_t
16529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16530 __arm_vgetq_lane_u16 (uint16x8_t __a, const int __idx)
16531 {
16532 __ARM_CHECK_LANEQ (__a, __idx);
16533 return __a[__ARM_LANEQ(__a,__idx)];
16534 }
16535
16536 __extension__ extern __inline uint32_t
16537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16538 __arm_vgetq_lane_u32 (uint32x4_t __a, const int __idx)
16539 {
16540 __ARM_CHECK_LANEQ (__a, __idx);
16541 return __a[__ARM_LANEQ(__a,__idx)];
16542 }
16543
16544 __extension__ extern __inline uint64_t
16545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16546 __arm_vgetq_lane_u64 (uint64x2_t __a, const int __idx)
16547 {
16548 __ARM_CHECK_LANEQ (__a, __idx);
16549 return __a[__ARM_LANEQ(__a,__idx)];
16550 }
16551
16552 __extension__ extern __inline uint64_t
16553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16554 __arm_lsll (uint64_t value, int32_t shift)
16555 {
16556 return (value << shift);
16557 }
16558
16559 __extension__ extern __inline int64_t
16560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16561 __arm_asrl (int64_t value, int32_t shift)
16562 {
16563 return (value >> shift);
16564 }
16565
16566 __extension__ extern __inline uint64_t
16567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16568 __arm_uqrshll (uint64_t value, int32_t shift)
16569 {
16570 return __builtin_mve_uqrshll_sat64_di (value, shift);
16571 }
16572
16573 __extension__ extern __inline uint64_t
16574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16575 __arm_uqrshll_sat48 (uint64_t value, int32_t shift)
16576 {
16577 return __builtin_mve_uqrshll_sat48_di (value, shift);
16578 }
16579
16580 __extension__ extern __inline int64_t
16581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16582 __arm_sqrshrl (int64_t value, int32_t shift)
16583 {
16584 return __builtin_mve_sqrshrl_sat64_di (value, shift);
16585 }
16586
16587 __extension__ extern __inline int64_t
16588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16589 __arm_sqrshrl_sat48 (int64_t value, int32_t shift)
16590 {
16591 return __builtin_mve_sqrshrl_sat48_di (value, shift);
16592 }
16593
16594 __extension__ extern __inline uint64_t
16595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16596 __arm_uqshll (uint64_t value, const int shift)
16597 {
16598 return __builtin_mve_uqshll_di (value, shift);
16599 }
16600
16601 __extension__ extern __inline uint64_t
16602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16603 __arm_urshrl (uint64_t value, const int shift)
16604 {
16605 return __builtin_mve_urshrl_di (value, shift);
16606 }
16607
16608 __extension__ extern __inline int64_t
16609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16610 __arm_srshrl (int64_t value, const int shift)
16611 {
16612 return __builtin_mve_srshrl_di (value, shift);
16613 }
16614
16615 __extension__ extern __inline int64_t
16616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16617 __arm_sqshll (int64_t value, const int shift)
16618 {
16619 return __builtin_mve_sqshll_di (value, shift);
16620 }
16621
16622 __extension__ extern __inline uint32_t
16623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16624 __arm_uqrshl (uint32_t value, int32_t shift)
16625 {
16626 return __builtin_mve_uqrshl_si (value, shift);
16627 }
16628
16629 __extension__ extern __inline int32_t
16630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16631 __arm_sqrshr (int32_t value, int32_t shift)
16632 {
16633 return __builtin_mve_sqrshr_si (value, shift);
16634 }
16635
16636 __extension__ extern __inline uint32_t
16637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16638 __arm_uqshl (uint32_t value, const int shift)
16639 {
16640 return __builtin_mve_uqshl_si (value, shift);
16641 }
16642
16643 __extension__ extern __inline uint32_t
16644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16645 __arm_urshr (uint32_t value, const int shift)
16646 {
16647 return __builtin_mve_urshr_si (value, shift);
16648 }
16649
16650 __extension__ extern __inline int32_t
16651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16652 __arm_sqshl (int32_t value, const int shift)
16653 {
16654 return __builtin_mve_sqshl_si (value, shift);
16655 }
16656
16657 __extension__ extern __inline int32_t
16658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16659 __arm_srshr (int32_t value, const int shift)
16660 {
16661 return __builtin_mve_srshr_si (value, shift);
16662 }
16663
16664 __extension__ extern __inline int8x16_t
16665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16666 __arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16667 {
16668 int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p);
16669 *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p);
16670 return __res;
16671 }
16672
16673 __extension__ extern __inline uint8x16_t
16674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16675 __arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16676 {
16677 uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p);
16678 *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p);
16679 return __res;
16680 }
16681
16682 __extension__ extern __inline int16x8_t
16683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16684 __arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16685 {
16686 int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p);
16687 *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p);
16688 return __res;
16689 }
16690
16691 __extension__ extern __inline uint16x8_t
16692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16693 __arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16694 {
16695 uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p);
16696 *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p);
16697 return __res;
16698 }
16699
16700 __extension__ extern __inline int32x4_t
16701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16702 __arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16703 {
16704 int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p);
16705 *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p);
16706 return __res;
16707 }
16708
16709 __extension__ extern __inline uint32x4_t
16710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16711 __arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
16712 {
16713 uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p);
16714 *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p);
16715 return __res;
16716 }
16717
16718 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
16719
16720 __extension__ extern __inline void
16721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16722 __arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value)
16723 {
16724 union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
16725 __rv.__i = __value;
16726 __builtin_mve_vst4qv8hf (__addr, __rv.__o);
16727 }
16728
16729 __extension__ extern __inline void
16730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16731 __arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value)
16732 {
16733 union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
16734 __rv.__i = __value;
16735 __builtin_mve_vst4qv4sf (__addr, __rv.__o);
16736 }
16737
16738 __extension__ extern __inline float16x8_t
16739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16740 __arm_vrndxq_f16 (float16x8_t __a)
16741 {
16742 return __builtin_mve_vrndxq_fv8hf (__a);
16743 }
16744
16745 __extension__ extern __inline float32x4_t
16746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16747 __arm_vrndxq_f32 (float32x4_t __a)
16748 {
16749 return __builtin_mve_vrndxq_fv4sf (__a);
16750 }
16751
16752 __extension__ extern __inline float16x8_t
16753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16754 __arm_vrndq_f16 (float16x8_t __a)
16755 {
16756 return __builtin_mve_vrndq_fv8hf (__a);
16757 }
16758
16759 __extension__ extern __inline float32x4_t
16760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16761 __arm_vrndq_f32 (float32x4_t __a)
16762 {
16763 return __builtin_mve_vrndq_fv4sf (__a);
16764 }
16765
16766 __extension__ extern __inline float16x8_t
16767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16768 __arm_vrndpq_f16 (float16x8_t __a)
16769 {
16770 return __builtin_mve_vrndpq_fv8hf (__a);
16771 }
16772
16773 __extension__ extern __inline float32x4_t
16774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16775 __arm_vrndpq_f32 (float32x4_t __a)
16776 {
16777 return __builtin_mve_vrndpq_fv4sf (__a);
16778 }
16779
16780 __extension__ extern __inline float16x8_t
16781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16782 __arm_vrndnq_f16 (float16x8_t __a)
16783 {
16784 return __builtin_mve_vrndnq_fv8hf (__a);
16785 }
16786
16787 __extension__ extern __inline float32x4_t
16788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16789 __arm_vrndnq_f32 (float32x4_t __a)
16790 {
16791 return __builtin_mve_vrndnq_fv4sf (__a);
16792 }
16793
16794 __extension__ extern __inline float16x8_t
16795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16796 __arm_vrndmq_f16 (float16x8_t __a)
16797 {
16798 return __builtin_mve_vrndmq_fv8hf (__a);
16799 }
16800
16801 __extension__ extern __inline float32x4_t
16802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16803 __arm_vrndmq_f32 (float32x4_t __a)
16804 {
16805 return __builtin_mve_vrndmq_fv4sf (__a);
16806 }
16807
16808 __extension__ extern __inline float16x8_t
16809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16810 __arm_vrndaq_f16 (float16x8_t __a)
16811 {
16812 return __builtin_mve_vrndaq_fv8hf (__a);
16813 }
16814
16815 __extension__ extern __inline float32x4_t
16816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16817 __arm_vrndaq_f32 (float32x4_t __a)
16818 {
16819 return __builtin_mve_vrndaq_fv4sf (__a);
16820 }
16821
16822 __extension__ extern __inline float16x8_t
16823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16824 __arm_vrev64q_f16 (float16x8_t __a)
16825 {
16826 return __builtin_mve_vrev64q_fv8hf (__a);
16827 }
16828
16829 __extension__ extern __inline float32x4_t
16830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16831 __arm_vrev64q_f32 (float32x4_t __a)
16832 {
16833 return __builtin_mve_vrev64q_fv4sf (__a);
16834 }
16835
16836 __extension__ extern __inline float16x8_t
16837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16838 __arm_vnegq_f16 (float16x8_t __a)
16839 {
16840 return __builtin_mve_vnegq_fv8hf (__a);
16841 }
16842
16843 __extension__ extern __inline float32x4_t
16844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16845 __arm_vnegq_f32 (float32x4_t __a)
16846 {
16847 return __builtin_mve_vnegq_fv4sf (__a);
16848 }
16849
16850 __extension__ extern __inline float16x8_t
16851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16852 __arm_vdupq_n_f16 (float16_t __a)
16853 {
16854 return __builtin_mve_vdupq_n_fv8hf (__a);
16855 }
16856
16857 __extension__ extern __inline float32x4_t
16858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16859 __arm_vdupq_n_f32 (float32_t __a)
16860 {
16861 return __builtin_mve_vdupq_n_fv4sf (__a);
16862 }
16863
16864 __extension__ extern __inline float16x8_t
16865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16866 __arm_vabsq_f16 (float16x8_t __a)
16867 {
16868 return __builtin_mve_vabsq_fv8hf (__a);
16869 }
16870
16871 __extension__ extern __inline float32x4_t
16872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16873 __arm_vabsq_f32 (float32x4_t __a)
16874 {
16875 return __builtin_mve_vabsq_fv4sf (__a);
16876 }
16877
16878 __extension__ extern __inline float16x8_t
16879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16880 __arm_vrev32q_f16 (float16x8_t __a)
16881 {
16882 return __builtin_mve_vrev32q_fv8hf (__a);
16883 }
16884
16885 __extension__ extern __inline float32x4_t
16886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16887 __arm_vcvttq_f32_f16 (float16x8_t __a)
16888 {
16889 return __builtin_mve_vcvttq_f32_f16v4sf (__a);
16890 }
16891
16892 __extension__ extern __inline float32x4_t
16893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16894 __arm_vcvtbq_f32_f16 (float16x8_t __a)
16895 {
16896 return __builtin_mve_vcvtbq_f32_f16v4sf (__a);
16897 }
16898
16899 __extension__ extern __inline float16x8_t
16900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16901 __arm_vcvtq_f16_s16 (int16x8_t __a)
16902 {
16903 return __builtin_mve_vcvtq_to_f_sv8hf (__a);
16904 }
16905
16906 __extension__ extern __inline float32x4_t
16907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16908 __arm_vcvtq_f32_s32 (int32x4_t __a)
16909 {
16910 return __builtin_mve_vcvtq_to_f_sv4sf (__a);
16911 }
16912
16913 __extension__ extern __inline float16x8_t
16914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16915 __arm_vcvtq_f16_u16 (uint16x8_t __a)
16916 {
16917 return __builtin_mve_vcvtq_to_f_uv8hf (__a);
16918 }
16919
16920 __extension__ extern __inline float32x4_t
16921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16922 __arm_vcvtq_f32_u32 (uint32x4_t __a)
16923 {
16924 return __builtin_mve_vcvtq_to_f_uv4sf (__a);
16925 }
16926
16927 __extension__ extern __inline int16x8_t
16928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16929 __arm_vcvtq_s16_f16 (float16x8_t __a)
16930 {
16931 return __builtin_mve_vcvtq_from_f_sv8hi (__a);
16932 }
16933
16934 __extension__ extern __inline int32x4_t
16935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16936 __arm_vcvtq_s32_f32 (float32x4_t __a)
16937 {
16938 return __builtin_mve_vcvtq_from_f_sv4si (__a);
16939 }
16940
16941 __extension__ extern __inline uint16x8_t
16942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16943 __arm_vcvtq_u16_f16 (float16x8_t __a)
16944 {
16945 return __builtin_mve_vcvtq_from_f_uv8hi (__a);
16946 }
16947
16948 __extension__ extern __inline uint32x4_t
16949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16950 __arm_vcvtq_u32_f32 (float32x4_t __a)
16951 {
16952 return __builtin_mve_vcvtq_from_f_uv4si (__a);
16953 }
16954
16955 __extension__ extern __inline uint16x8_t
16956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16957 __arm_vcvtpq_u16_f16 (float16x8_t __a)
16958 {
16959 return __builtin_mve_vcvtpq_uv8hi (__a);
16960 }
16961
16962 __extension__ extern __inline uint32x4_t
16963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16964 __arm_vcvtpq_u32_f32 (float32x4_t __a)
16965 {
16966 return __builtin_mve_vcvtpq_uv4si (__a);
16967 }
16968
16969 __extension__ extern __inline uint16x8_t
16970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16971 __arm_vcvtnq_u16_f16 (float16x8_t __a)
16972 {
16973 return __builtin_mve_vcvtnq_uv8hi (__a);
16974 }
16975
16976 __extension__ extern __inline uint32x4_t
16977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16978 __arm_vcvtnq_u32_f32 (float32x4_t __a)
16979 {
16980 return __builtin_mve_vcvtnq_uv4si (__a);
16981 }
16982
16983 __extension__ extern __inline uint16x8_t
16984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16985 __arm_vcvtmq_u16_f16 (float16x8_t __a)
16986 {
16987 return __builtin_mve_vcvtmq_uv8hi (__a);
16988 }
16989
16990 __extension__ extern __inline uint32x4_t
16991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16992 __arm_vcvtmq_u32_f32 (float32x4_t __a)
16993 {
16994 return __builtin_mve_vcvtmq_uv4si (__a);
16995 }
16996
16997 __extension__ extern __inline uint16x8_t
16998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16999 __arm_vcvtaq_u16_f16 (float16x8_t __a)
17000 {
17001 return __builtin_mve_vcvtaq_uv8hi (__a);
17002 }
17003
17004 __extension__ extern __inline uint32x4_t
17005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17006 __arm_vcvtaq_u32_f32 (float32x4_t __a)
17007 {
17008 return __builtin_mve_vcvtaq_uv4si (__a);
17009 }
17010
17011 __extension__ extern __inline int16x8_t
17012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17013 __arm_vcvtaq_s16_f16 (float16x8_t __a)
17014 {
17015 return __builtin_mve_vcvtaq_sv8hi (__a);
17016 }
17017
17018 __extension__ extern __inline int32x4_t
17019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17020 __arm_vcvtaq_s32_f32 (float32x4_t __a)
17021 {
17022 return __builtin_mve_vcvtaq_sv4si (__a);
17023 }
17024
17025 __extension__ extern __inline int16x8_t
17026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17027 __arm_vcvtnq_s16_f16 (float16x8_t __a)
17028 {
17029 return __builtin_mve_vcvtnq_sv8hi (__a);
17030 }
17031
17032 __extension__ extern __inline int32x4_t
17033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17034 __arm_vcvtnq_s32_f32 (float32x4_t __a)
17035 {
17036 return __builtin_mve_vcvtnq_sv4si (__a);
17037 }
17038
17039 __extension__ extern __inline int16x8_t
17040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17041 __arm_vcvtpq_s16_f16 (float16x8_t __a)
17042 {
17043 return __builtin_mve_vcvtpq_sv8hi (__a);
17044 }
17045
17046 __extension__ extern __inline int32x4_t
17047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17048 __arm_vcvtpq_s32_f32 (float32x4_t __a)
17049 {
17050 return __builtin_mve_vcvtpq_sv4si (__a);
17051 }
17052
17053 __extension__ extern __inline int16x8_t
17054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17055 __arm_vcvtmq_s16_f16 (float16x8_t __a)
17056 {
17057 return __builtin_mve_vcvtmq_sv8hi (__a);
17058 }
17059
17060 __extension__ extern __inline int32x4_t
17061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17062 __arm_vcvtmq_s32_f32 (float32x4_t __a)
17063 {
17064 return __builtin_mve_vcvtmq_sv4si (__a);
17065 }
17066
17067 __extension__ extern __inline float16x8_t
17068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17069 __arm_vsubq_n_f16 (float16x8_t __a, float16_t __b)
17070 {
17071 return __builtin_mve_vsubq_n_fv8hf (__a, __b);
17072 }
17073
17074 __extension__ extern __inline float32x4_t
17075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17076 __arm_vsubq_n_f32 (float32x4_t __a, float32_t __b)
17077 {
17078 return __builtin_mve_vsubq_n_fv4sf (__a, __b);
17079 }
17080
17081 __extension__ extern __inline float16x8_t
17082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17083 __arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b)
17084 {
17085 return __builtin_mve_vbrsrq_n_fv8hf (__a, __b);
17086 }
17087
17088 __extension__ extern __inline float32x4_t
17089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17090 __arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b)
17091 {
17092 return __builtin_mve_vbrsrq_n_fv4sf (__a, __b);
17093 }
17094
17095 __extension__ extern __inline float16x8_t
17096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17097 __arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6)
17098 {
17099 return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6);
17100 }
17101
17102 __extension__ extern __inline float32x4_t
17103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17104 __arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6)
17105 {
17106 return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6);
17107 }
17108
17109 __extension__ extern __inline float16x8_t
17110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17111 __arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6)
17112 {
17113 return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6);
17114 }
17115
17116 __extension__ extern __inline float32x4_t
17117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17118 __arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6)
17119 {
17120 return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6);
17121 }
17122
17123 __extension__ extern __inline float16x8_t
17124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17125 __arm_vcreateq_f16 (uint64_t __a, uint64_t __b)
17126 {
17127 return __builtin_mve_vcreateq_fv8hf (__a, __b);
17128 }
17129
17130 __extension__ extern __inline float32x4_t
17131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17132 __arm_vcreateq_f32 (uint64_t __a, uint64_t __b)
17133 {
17134 return __builtin_mve_vcreateq_fv4sf (__a, __b);
17135 }
17136
17137 __extension__ extern __inline int16x8_t
17138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17139 __arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6)
17140 {
17141 return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6);
17142 }
17143
17144 __extension__ extern __inline int32x4_t
17145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17146 __arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6)
17147 {
17148 return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6);
17149 }
17150
17151 __extension__ extern __inline uint16x8_t
17152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17153 __arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6)
17154 {
17155 return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6);
17156 }
17157
17158 __extension__ extern __inline uint32x4_t
17159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17160 __arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6)
17161 {
17162 return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6);
17163 }
17164
17165 __extension__ extern __inline mve_pred16_t
17166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17167 __arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b)
17168 {
17169 return __builtin_mve_vcmpneq_n_fv8hf (__a, __b);
17170 }
17171
17172 __extension__ extern __inline mve_pred16_t
17173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17174 __arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b)
17175 {
17176 return __builtin_mve_vcmpneq_fv8hf (__a, __b);
17177 }
17178
17179 __extension__ extern __inline mve_pred16_t
17180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17181 __arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b)
17182 {
17183 return __builtin_mve_vcmpltq_n_fv8hf (__a, __b);
17184 }
17185
17186 __extension__ extern __inline mve_pred16_t
17187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17188 __arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b)
17189 {
17190 return __builtin_mve_vcmpltq_fv8hf (__a, __b);
17191 }
17192
17193 __extension__ extern __inline mve_pred16_t
17194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17195 __arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b)
17196 {
17197 return __builtin_mve_vcmpleq_n_fv8hf (__a, __b);
17198 }
17199
17200 __extension__ extern __inline mve_pred16_t
17201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17202 __arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b)
17203 {
17204 return __builtin_mve_vcmpleq_fv8hf (__a, __b);
17205 }
17206
17207 __extension__ extern __inline mve_pred16_t
17208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17209 __arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b)
17210 {
17211 return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b);
17212 }
17213
17214 __extension__ extern __inline mve_pred16_t
17215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17216 __arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b)
17217 {
17218 return __builtin_mve_vcmpgtq_fv8hf (__a, __b);
17219 }
17220
17221 __extension__ extern __inline mve_pred16_t
17222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17223 __arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b)
17224 {
17225 return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b);
17226 }
17227
17228 __extension__ extern __inline mve_pred16_t
17229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17230 __arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b)
17231 {
17232 return __builtin_mve_vcmpgeq_fv8hf (__a, __b);
17233 }
17234
17235 __extension__ extern __inline mve_pred16_t
17236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17237 __arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b)
17238 {
17239 return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b);
17240 }
17241
17242 __extension__ extern __inline mve_pred16_t
17243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17244 __arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b)
17245 {
17246 return __builtin_mve_vcmpeqq_fv8hf (__a, __b);
17247 }
17248
17249 __extension__ extern __inline float16x8_t
17250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17251 __arm_vsubq_f16 (float16x8_t __a, float16x8_t __b)
17252 {
17253 return __builtin_mve_vsubq_fv8hf (__a, __b);
17254 }
17255
17256 __extension__ extern __inline float16x8_t
17257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17258 __arm_vorrq_f16 (float16x8_t __a, float16x8_t __b)
17259 {
17260 return __builtin_mve_vorrq_fv8hf (__a, __b);
17261 }
17262
17263 __extension__ extern __inline float16x8_t
17264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17265 __arm_vornq_f16 (float16x8_t __a, float16x8_t __b)
17266 {
17267 return __builtin_mve_vornq_fv8hf (__a, __b);
17268 }
17269
17270 __extension__ extern __inline float16x8_t
17271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17272 __arm_vmulq_n_f16 (float16x8_t __a, float16_t __b)
17273 {
17274 return __builtin_mve_vmulq_n_fv8hf (__a, __b);
17275 }
17276
17277 __extension__ extern __inline float16x8_t
17278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17279 __arm_vmulq_f16 (float16x8_t __a, float16x8_t __b)
17280 {
17281 return __builtin_mve_vmulq_fv8hf (__a, __b);
17282 }
17283
17284 __extension__ extern __inline float16_t
17285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17286 __arm_vminnmvq_f16 (float16_t __a, float16x8_t __b)
17287 {
17288 return __builtin_mve_vminnmvq_fv8hf (__a, __b);
17289 }
17290
17291 __extension__ extern __inline float16x8_t
17292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17293 __arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b)
17294 {
17295 return __builtin_mve_vminnmq_fv8hf (__a, __b);
17296 }
17297
17298 __extension__ extern __inline float16_t
17299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17300 __arm_vminnmavq_f16 (float16_t __a, float16x8_t __b)
17301 {
17302 return __builtin_mve_vminnmavq_fv8hf (__a, __b);
17303 }
17304
17305 __extension__ extern __inline float16x8_t
17306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17307 __arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b)
17308 {
17309 return __builtin_mve_vminnmaq_fv8hf (__a, __b);
17310 }
17311
17312 __extension__ extern __inline float16_t
17313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17314 __arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b)
17315 {
17316 return __builtin_mve_vmaxnmvq_fv8hf (__a, __b);
17317 }
17318
17319 __extension__ extern __inline float16x8_t
17320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17321 __arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
17322 {
17323 return __builtin_mve_vmaxnmq_fv8hf (__a, __b);
17324 }
17325
17326 __extension__ extern __inline float16_t
17327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17328 __arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b)
17329 {
17330 return __builtin_mve_vmaxnmavq_fv8hf (__a, __b);
17331 }
17332
17333 __extension__ extern __inline float16x8_t
17334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17335 __arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b)
17336 {
17337 return __builtin_mve_vmaxnmaq_fv8hf (__a, __b);
17338 }
17339
17340 __extension__ extern __inline float16x8_t
17341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17342 __arm_veorq_f16 (float16x8_t __a, float16x8_t __b)
17343 {
17344 return __builtin_mve_veorq_fv8hf (__a, __b);
17345 }
17346
17347 __extension__ extern __inline float16x8_t
17348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17349 __arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
17350 {
17351 return __builtin_mve_vcmulq_rot90v8hf (__a, __b);
17352 }
17353
17354 __extension__ extern __inline float16x8_t
17355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17356 __arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b)
17357 {
17358 return __builtin_mve_vcmulq_rot270v8hf (__a, __b);
17359 }
17360
17361 __extension__ extern __inline float16x8_t
17362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17363 __arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b)
17364 {
17365 return __builtin_mve_vcmulq_rot180v8hf (__a, __b);
17366 }
17367
17368 __extension__ extern __inline float16x8_t
17369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17370 __arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b)
17371 {
17372 return __builtin_mve_vcmulqv8hf (__a, __b);
17373 }
17374
17375 __extension__ extern __inline float16x8_t
17376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17377 __arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b)
17378 {
17379 return __builtin_mve_vcaddq_rot90v8hf (__a, __b);
17380 }
17381
17382 __extension__ extern __inline float16x8_t
17383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17384 __arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b)
17385 {
17386 return __builtin_mve_vcaddq_rot270v8hf (__a, __b);
17387 }
17388
17389 __extension__ extern __inline float16x8_t
17390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17391 __arm_vbicq_f16 (float16x8_t __a, float16x8_t __b)
17392 {
17393 return __builtin_mve_vbicq_fv8hf (__a, __b);
17394 }
17395
17396 __extension__ extern __inline float16x8_t
17397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17398 __arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
17399 {
17400 return __builtin_mve_vandq_fv8hf (__a, __b);
17401 }
17402
17403 __extension__ extern __inline float16x8_t
17404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17405 __arm_vaddq_n_f16 (float16x8_t __a, float16_t __b)
17406 {
17407 return __builtin_mve_vaddq_n_fv8hf (__a, __b);
17408 }
17409
17410 __extension__ extern __inline float16x8_t
17411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17412 __arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
17413 {
17414 return __builtin_mve_vabdq_fv8hf (__a, __b);
17415 }
17416
17417 __extension__ extern __inline mve_pred16_t
17418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17419 __arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b)
17420 {
17421 return __builtin_mve_vcmpneq_n_fv4sf (__a, __b);
17422 }
17423
17424 __extension__ extern __inline mve_pred16_t
17425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17426 __arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b)
17427 {
17428 return __builtin_mve_vcmpneq_fv4sf (__a, __b);
17429 }
17430
17431 __extension__ extern __inline mve_pred16_t
17432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17433 __arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b)
17434 {
17435 return __builtin_mve_vcmpltq_n_fv4sf (__a, __b);
17436 }
17437
17438 __extension__ extern __inline mve_pred16_t
17439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17440 __arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b)
17441 {
17442 return __builtin_mve_vcmpltq_fv4sf (__a, __b);
17443 }
17444
17445 __extension__ extern __inline mve_pred16_t
17446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17447 __arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b)
17448 {
17449 return __builtin_mve_vcmpleq_n_fv4sf (__a, __b);
17450 }
17451
17452 __extension__ extern __inline mve_pred16_t
17453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17454 __arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b)
17455 {
17456 return __builtin_mve_vcmpleq_fv4sf (__a, __b);
17457 }
17458
17459 __extension__ extern __inline mve_pred16_t
17460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17461 __arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b)
17462 {
17463 return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b);
17464 }
17465
17466 __extension__ extern __inline mve_pred16_t
17467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17468 __arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b)
17469 {
17470 return __builtin_mve_vcmpgtq_fv4sf (__a, __b);
17471 }
17472
17473 __extension__ extern __inline mve_pred16_t
17474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17475 __arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b)
17476 {
17477 return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b);
17478 }
17479
17480 __extension__ extern __inline mve_pred16_t
17481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17482 __arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b)
17483 {
17484 return __builtin_mve_vcmpgeq_fv4sf (__a, __b);
17485 }
17486
17487 __extension__ extern __inline mve_pred16_t
17488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17489 __arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b)
17490 {
17491 return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b);
17492 }
17493
17494 __extension__ extern __inline mve_pred16_t
17495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17496 __arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b)
17497 {
17498 return __builtin_mve_vcmpeqq_fv4sf (__a, __b);
17499 }
17500
17501 __extension__ extern __inline float32x4_t
17502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17503 __arm_vsubq_f32 (float32x4_t __a, float32x4_t __b)
17504 {
17505 return __builtin_mve_vsubq_fv4sf (__a, __b);
17506 }
17507
17508 __extension__ extern __inline float32x4_t
17509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17510 __arm_vorrq_f32 (float32x4_t __a, float32x4_t __b)
17511 {
17512 return __builtin_mve_vorrq_fv4sf (__a, __b);
17513 }
17514
17515 __extension__ extern __inline float32x4_t
17516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17517 __arm_vornq_f32 (float32x4_t __a, float32x4_t __b)
17518 {
17519 return __builtin_mve_vornq_fv4sf (__a, __b);
17520 }
17521
17522 __extension__ extern __inline float32x4_t
17523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17524 __arm_vmulq_n_f32 (float32x4_t __a, float32_t __b)
17525 {
17526 return __builtin_mve_vmulq_n_fv4sf (__a, __b);
17527 }
17528
17529 __extension__ extern __inline float32x4_t
17530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17531 __arm_vmulq_f32 (float32x4_t __a, float32x4_t __b)
17532 {
17533 return __builtin_mve_vmulq_fv4sf (__a, __b);
17534 }
17535
17536 __extension__ extern __inline float32_t
17537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17538 __arm_vminnmvq_f32 (float32_t __a, float32x4_t __b)
17539 {
17540 return __builtin_mve_vminnmvq_fv4sf (__a, __b);
17541 }
17542
17543 __extension__ extern __inline float32x4_t
17544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17545 __arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b)
17546 {
17547 return __builtin_mve_vminnmq_fv4sf (__a, __b);
17548 }
17549
17550 __extension__ extern __inline float32_t
17551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17552 __arm_vminnmavq_f32 (float32_t __a, float32x4_t __b)
17553 {
17554 return __builtin_mve_vminnmavq_fv4sf (__a, __b);
17555 }
17556
17557 __extension__ extern __inline float32x4_t
17558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17559 __arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b)
17560 {
17561 return __builtin_mve_vminnmaq_fv4sf (__a, __b);
17562 }
17563
17564 __extension__ extern __inline float32_t
17565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17566 __arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b)
17567 {
17568 return __builtin_mve_vmaxnmvq_fv4sf (__a, __b);
17569 }
17570
17571 __extension__ extern __inline float32x4_t
17572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17573 __arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
17574 {
17575 return __builtin_mve_vmaxnmq_fv4sf (__a, __b);
17576 }
17577
17578 __extension__ extern __inline float32_t
17579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17580 __arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b)
17581 {
17582 return __builtin_mve_vmaxnmavq_fv4sf (__a, __b);
17583 }
17584
17585 __extension__ extern __inline float32x4_t
17586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17587 __arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b)
17588 {
17589 return __builtin_mve_vmaxnmaq_fv4sf (__a, __b);
17590 }
17591
17592 __extension__ extern __inline float32x4_t
17593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17594 __arm_veorq_f32 (float32x4_t __a, float32x4_t __b)
17595 {
17596 return __builtin_mve_veorq_fv4sf (__a, __b);
17597 }
17598
17599 __extension__ extern __inline float32x4_t
17600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17601 __arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
17602 {
17603 return __builtin_mve_vcmulq_rot90v4sf (__a, __b);
17604 }
17605
17606 __extension__ extern __inline float32x4_t
17607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17608 __arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b)
17609 {
17610 return __builtin_mve_vcmulq_rot270v4sf (__a, __b);
17611 }
17612
17613 __extension__ extern __inline float32x4_t
17614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17615 __arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b)
17616 {
17617 return __builtin_mve_vcmulq_rot180v4sf (__a, __b);
17618 }
17619
17620 __extension__ extern __inline float32x4_t
17621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17622 __arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b)
17623 {
17624 return __builtin_mve_vcmulqv4sf (__a, __b);
17625 }
17626
17627 __extension__ extern __inline float32x4_t
17628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17629 __arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b)
17630 {
17631 return __builtin_mve_vcaddq_rot90v4sf (__a, __b);
17632 }
17633
17634 __extension__ extern __inline float32x4_t
17635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17636 __arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b)
17637 {
17638 return __builtin_mve_vcaddq_rot270v4sf (__a, __b);
17639 }
17640
17641 __extension__ extern __inline float32x4_t
17642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17643 __arm_vbicq_f32 (float32x4_t __a, float32x4_t __b)
17644 {
17645 return __builtin_mve_vbicq_fv4sf (__a, __b);
17646 }
17647
17648 __extension__ extern __inline float32x4_t
17649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17650 __arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
17651 {
17652 return __builtin_mve_vandq_fv4sf (__a, __b);
17653 }
17654
17655 __extension__ extern __inline float32x4_t
17656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17657 __arm_vaddq_n_f32 (float32x4_t __a, float32_t __b)
17658 {
17659 return __builtin_mve_vaddq_n_fv4sf (__a, __b);
17660 }
17661
17662 __extension__ extern __inline float32x4_t
17663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17664 __arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
17665 {
17666 return __builtin_mve_vabdq_fv4sf (__a, __b);
17667 }
17668
17669 __extension__ extern __inline float16x8_t
17670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17671 __arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b)
17672 {
17673 return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b);
17674 }
17675
17676 __extension__ extern __inline float16x8_t
17677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17678 __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b)
17679 {
17680 return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b);
17681 }
17682
17683 __extension__ extern __inline mve_pred16_t
17684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17685 __arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17686 {
17687 return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p);
17688 }
17689
17690 __extension__ extern __inline mve_pred16_t
17691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17692 __arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
17693 {
17694 return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p);
17695 }
17696
17697 __extension__ extern __inline int16x8_t
17698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17699 __arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17700 {
17701 return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p);
17702 }
17703
17704 __extension__ extern __inline uint16x8_t
17705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17706 __arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17707 {
17708 return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p);
17709 }
17710
17711 __extension__ extern __inline int32x4_t
17712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17713 __arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
17714 {
17715 return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p);
17716 }
17717
17718 __extension__ extern __inline uint32x4_t
17719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17720 __arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
17721 {
17722 return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p);
17723 }
17724
17725 __extension__ extern __inline float16x8_t
17726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17727 __arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
17728 {
17729 return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p);
17730 }
17731
17732 __extension__ extern __inline float16x8_t
17733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17734 __arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
17735 {
17736 return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p);
17737 }
17738
17739 __extension__ extern __inline float32x4_t
17740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17741 __arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
17742 {
17743 return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p);
17744 }
17745
17746 __extension__ extern __inline float32x4_t
17747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17748 __arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
17749 {
17750 return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p);
17751 }
17752
17753
17754 __extension__ extern __inline float16x8_t
17755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17756 __arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
17757 {
17758 return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p);
17759 }
17760
17761 __extension__ extern __inline float32x4_t
17762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17763 __arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
17764 {
17765 return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p);
17766 }
17767
17768 __extension__ extern __inline float16x8_t
17769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17770 __arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
17771 {
17772 return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p);
17773 }
17774
17775 __extension__ extern __inline float32x4_t
17776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17777 __arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
17778 {
17779 return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p);
17780 }
17781
17782 __extension__ extern __inline float16x8_t
17783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17784 __arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17785 {
17786 return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p);
17787 }
17788
17789 __extension__ extern __inline float16x8_t
17790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17791 __arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17792 {
17793 return __builtin_mve_vcmlaqv8hf (__a, __b, __c);
17794 }
17795
17796 __extension__ extern __inline float16x8_t
17797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17798 __arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17799 {
17800 return __builtin_mve_vcmlaq_rot180v8hf (__a, __b, __c);
17801 }
17802
17803 __extension__ extern __inline float16x8_t
17804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17805 __arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17806 {
17807 return __builtin_mve_vcmlaq_rot270v8hf (__a, __b, __c);
17808 }
17809
17810 __extension__ extern __inline float16x8_t
17811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17812 __arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17813 {
17814 return __builtin_mve_vcmlaq_rot90v8hf (__a, __b, __c);
17815 }
17816
17817 __extension__ extern __inline float16x8_t
17818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17819 __arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17820 {
17821 return __builtin_mve_vfmaq_fv8hf (__a, __b, __c);
17822 }
17823
17824 __extension__ extern __inline float16x8_t
17825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17826 __arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
17827 {
17828 return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c);
17829 }
17830
17831 __extension__ extern __inline float16x8_t
17832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17833 __arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
17834 {
17835 return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c);
17836 }
17837
17838 __extension__ extern __inline float16x8_t
17839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17840 __arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
17841 {
17842 return __builtin_mve_vfmsq_fv8hf (__a, __b, __c);
17843 }
17844
17845 __extension__ extern __inline float16x8_t
17846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17847 __arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17848 {
17849 return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p);
17850 }
17851
17852 __extension__ extern __inline int16x8_t
17853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17854 __arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17855 {
17856 return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p);
17857 }
17858
17859 __extension__ extern __inline int16x8_t
17860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17861 __arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17862 {
17863 return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p);
17864 }
17865
17866 __extension__ extern __inline int16x8_t
17867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17868 __arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17869 {
17870 return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p);
17871 }
17872
17873 __extension__ extern __inline int16x8_t
17874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17875 __arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17876 {
17877 return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p);
17878 }
17879
17880 __extension__ extern __inline float16x8_t
17881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17882 __arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
17883 {
17884 return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p);
17885 }
17886
17887 __extension__ extern __inline float16x8_t
17888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17889 __arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17890 {
17891 return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p);
17892 }
17893
17894 __extension__ extern __inline float16_t
17895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17896 __arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17897 {
17898 return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p);
17899 }
17900
17901 __extension__ extern __inline float16_t
17902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17903 __arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17904 {
17905 return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p);
17906 }
17907
17908 __extension__ extern __inline float16x8_t
17909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17910 __arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17911 {
17912 return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p);
17913 }
17914
17915 __extension__ extern __inline float16_t
17916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17917 __arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17918 {
17919 return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p);
17920 }
17921
17922 __extension__ extern __inline float16_t
17923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17924 __arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
17925 {
17926 return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p);
17927 }
17928
17929 __extension__ extern __inline float16x8_t
17930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17931 __arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17932 {
17933 return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p);
17934 }
17935
17936 __extension__ extern __inline float16x8_t
17937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17938 __arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
17939 {
17940 return __builtin_mve_vpselq_fv8hf (__a, __b, __p);
17941 }
17942
17943 __extension__ extern __inline float16x8_t
17944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17945 __arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17946 {
17947 return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p);
17948 }
17949
17950 __extension__ extern __inline float16x8_t
17951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17952 __arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17953 {
17954 return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p);
17955 }
17956
17957 __extension__ extern __inline float16x8_t
17958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17959 __arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17960 {
17961 return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p);
17962 }
17963
17964 __extension__ extern __inline float16x8_t
17965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17966 __arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17967 {
17968 return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p);
17969 }
17970
17971 __extension__ extern __inline float16x8_t
17972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17973 __arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17974 {
17975 return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p);
17976 }
17977
17978 __extension__ extern __inline float16x8_t
17979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17980 __arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17981 {
17982 return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p);
17983 }
17984
17985 __extension__ extern __inline float16x8_t
17986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17987 __arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
17988 {
17989 return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p);
17990 }
17991
17992 __extension__ extern __inline mve_pred16_t
17993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17994 __arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
17995 {
17996 return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p);
17997 }
17998
17999 __extension__ extern __inline mve_pred16_t
18000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18001 __arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18002 {
18003 return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p);
18004 }
18005
18006 __extension__ extern __inline mve_pred16_t
18007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18008 __arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18009 {
18010 return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p);
18011 }
18012
18013 __extension__ extern __inline mve_pred16_t
18014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18015 __arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18016 {
18017 return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p);
18018 }
18019
18020 __extension__ extern __inline mve_pred16_t
18021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18022 __arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18023 {
18024 return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p);
18025 }
18026
18027 __extension__ extern __inline mve_pred16_t
18028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18029 __arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18030 {
18031 return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p);
18032 }
18033
18034 __extension__ extern __inline mve_pred16_t
18035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18036 __arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18037 {
18038 return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p);
18039 }
18040
18041 __extension__ extern __inline mve_pred16_t
18042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18043 __arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18044 {
18045 return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p);
18046 }
18047
18048 __extension__ extern __inline mve_pred16_t
18049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18050 __arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18051 {
18052 return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p);
18053 }
18054
18055 __extension__ extern __inline mve_pred16_t
18056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18057 __arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18058 {
18059 return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p);
18060 }
18061
18062 __extension__ extern __inline mve_pred16_t
18063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18064 __arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
18065 {
18066 return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p);
18067 }
18068
18069 __extension__ extern __inline uint16x8_t
18070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18071 __arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18072 {
18073 return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p);
18074 }
18075
18076 __extension__ extern __inline uint16x8_t
18077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18078 __arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18079 {
18080 return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p);
18081 }
18082
18083 __extension__ extern __inline uint16x8_t
18084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18085 __arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18086 {
18087 return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p);
18088 }
18089
18090 __extension__ extern __inline uint16x8_t
18091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18092 __arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
18093 {
18094 return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p);
18095 }
18096
18097 __extension__ extern __inline float32x4_t
18098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18099 __arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18100 {
18101 return __builtin_mve_vcmlaqv4sf (__a, __b, __c);
18102 }
18103
18104 __extension__ extern __inline float32x4_t
18105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18106 __arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18107 {
18108 return __builtin_mve_vcmlaq_rot180v4sf (__a, __b, __c);
18109 }
18110
18111 __extension__ extern __inline float32x4_t
18112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18113 __arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18114 {
18115 return __builtin_mve_vcmlaq_rot270v4sf (__a, __b, __c);
18116 }
18117
18118 __extension__ extern __inline float32x4_t
18119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18120 __arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18121 {
18122 return __builtin_mve_vcmlaq_rot90v4sf (__a, __b, __c);
18123 }
18124
18125 __extension__ extern __inline float32x4_t
18126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18127 __arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18128 {
18129 return __builtin_mve_vfmaq_fv4sf (__a, __b, __c);
18130 }
18131
18132 __extension__ extern __inline float32x4_t
18133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18134 __arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
18135 {
18136 return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c);
18137 }
18138
18139 __extension__ extern __inline float32x4_t
18140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18141 __arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
18142 {
18143 return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c);
18144 }
18145
18146 __extension__ extern __inline float32x4_t
18147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18148 __arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
18149 {
18150 return __builtin_mve_vfmsq_fv4sf (__a, __b, __c);
18151 }
18152
18153 __extension__ extern __inline float32x4_t
18154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18155 __arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18156 {
18157 return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p);
18158 }
18159
18160 __extension__ extern __inline int32x4_t
18161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18162 __arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18163 {
18164 return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p);
18165 }
18166
18167 __extension__ extern __inline int32x4_t
18168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18169 __arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18170 {
18171 return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p);
18172 }
18173
18174 __extension__ extern __inline int32x4_t
18175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18176 __arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18177 {
18178 return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p);
18179 }
18180
18181 __extension__ extern __inline int32x4_t
18182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18183 __arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18184 {
18185 return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p);
18186 }
18187
18188 __extension__ extern __inline float32x4_t
18189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18190 __arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
18191 {
18192 return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p);
18193 }
18194
18195 __extension__ extern __inline float32x4_t
18196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18197 __arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18198 {
18199 return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p);
18200 }
18201
18202 __extension__ extern __inline float32_t
18203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18204 __arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18205 {
18206 return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p);
18207 }
18208
18209 __extension__ extern __inline float32_t
18210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18211 __arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18212 {
18213 return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p);
18214 }
18215
18216 __extension__ extern __inline float32x4_t
18217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18218 __arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18219 {
18220 return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p);
18221 }
18222
18223 __extension__ extern __inline float32_t
18224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18225 __arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18226 {
18227 return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p);
18228 }
18229
18230 __extension__ extern __inline float32_t
18231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18232 __arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
18233 {
18234 return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p);
18235 }
18236
18237 __extension__ extern __inline float32x4_t
18238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18239 __arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18240 {
18241 return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p);
18242 }
18243
18244 __extension__ extern __inline float32x4_t
18245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18246 __arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18247 {
18248 return __builtin_mve_vpselq_fv4sf (__a, __b, __p);
18249 }
18250
18251 __extension__ extern __inline float32x4_t
18252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18253 __arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18254 {
18255 return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p);
18256 }
18257
18258 __extension__ extern __inline float32x4_t
18259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18260 __arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18261 {
18262 return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p);
18263 }
18264
18265 __extension__ extern __inline float32x4_t
18266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18267 __arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18268 {
18269 return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p);
18270 }
18271
18272 __extension__ extern __inline float32x4_t
18273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18274 __arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18275 {
18276 return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p);
18277 }
18278
18279 __extension__ extern __inline float32x4_t
18280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18281 __arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18282 {
18283 return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p);
18284 }
18285
18286 __extension__ extern __inline float32x4_t
18287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18288 __arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18289 {
18290 return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p);
18291 }
18292
18293 __extension__ extern __inline float32x4_t
18294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18295 __arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18296 {
18297 return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p);
18298 }
18299
18300 __extension__ extern __inline mve_pred16_t
18301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18302 __arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18303 {
18304 return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p);
18305 }
18306
18307 __extension__ extern __inline mve_pred16_t
18308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18309 __arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18310 {
18311 return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p);
18312 }
18313
18314 __extension__ extern __inline mve_pred16_t
18315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18316 __arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18317 {
18318 return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p);
18319 }
18320
18321 __extension__ extern __inline mve_pred16_t
18322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18323 __arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18324 {
18325 return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p);
18326 }
18327
18328 __extension__ extern __inline mve_pred16_t
18329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18330 __arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18331 {
18332 return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p);
18333 }
18334
18335 __extension__ extern __inline mve_pred16_t
18336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18337 __arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18338 {
18339 return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p);
18340 }
18341
18342 __extension__ extern __inline mve_pred16_t
18343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18344 __arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18345 {
18346 return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p);
18347 }
18348
18349 __extension__ extern __inline mve_pred16_t
18350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18351 __arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18352 {
18353 return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p);
18354 }
18355
18356 __extension__ extern __inline mve_pred16_t
18357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18358 __arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18359 {
18360 return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p);
18361 }
18362
18363 __extension__ extern __inline mve_pred16_t
18364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18365 __arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18366 {
18367 return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p);
18368 }
18369
18370 __extension__ extern __inline mve_pred16_t
18371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18372 __arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
18373 {
18374 return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p);
18375 }
18376
18377 __extension__ extern __inline uint32x4_t
18378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18379 __arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18380 {
18381 return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p);
18382 }
18383
18384 __extension__ extern __inline uint32x4_t
18385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18386 __arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18387 {
18388 return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p);
18389 }
18390
18391 __extension__ extern __inline uint32x4_t
18392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18393 __arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18394 {
18395 return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p);
18396 }
18397
18398 __extension__ extern __inline uint32x4_t
18399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18400 __arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
18401 {
18402 return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p);
18403 }
18404
18405 __extension__ extern __inline float16x8_t
18406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18407 __arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
18408 {
18409 return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p);
18410 }
18411
18412 __extension__ extern __inline float16x8_t
18413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18414 __arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
18415 {
18416 return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p);
18417 }
18418
18419 __extension__ extern __inline float32x4_t
18420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18421 __arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
18422 {
18423 return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p);
18424 }
18425
18426 __extension__ extern __inline float32x4_t
18427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18428 __arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
18429 {
18430 return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p);
18431 }
18432
18433 __extension__ extern __inline float32x4_t
18434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18435 __arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18436 {
18437 return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p);
18438 }
18439
18440 __extension__ extern __inline float16x8_t
18441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18442 __arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18443 {
18444 return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
18445 }
18446
18447 __extension__ extern __inline float32x4_t
18448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18449 __arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18450 {
18451 return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p);
18452 }
18453
18454 __extension__ extern __inline float16x8_t
18455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18456 __arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18457 {
18458 return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p);
18459 }
18460
18461 __extension__ extern __inline float32x4_t
18462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18463 __arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18464 {
18465 return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p);
18466 }
18467
18468 __extension__ extern __inline float16x8_t
18469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18470 __arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18471 {
18472 return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p);
18473 }
18474
18475 __extension__ extern __inline float32x4_t
18476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18477 __arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18478 {
18479 return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p);
18480 }
18481
18482 __extension__ extern __inline float16x8_t
18483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18484 __arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18485 {
18486 return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p);
18487 }
18488
18489 __extension__ extern __inline float32x4_t
18490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18491 __arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18492 {
18493 return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p);
18494 }
18495
18496 __extension__ extern __inline float16x8_t
18497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18498 __arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18499 {
18500 return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p);
18501 }
18502
18503 __extension__ extern __inline float32x4_t
18504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18505 __arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
18506 {
18507 return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p);
18508 }
18509
18510 __extension__ extern __inline float16x8_t
18511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18512 __arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
18513 {
18514 return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p);
18515 }
18516
18517 __extension__ extern __inline float32x4_t
18518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18519 __arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18520 {
18521 return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p);
18522 }
18523
18524 __extension__ extern __inline float16x8_t
18525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18526 __arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18527 {
18528 return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p);
18529 }
18530
18531 __extension__ extern __inline float32x4_t
18532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18533 __arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18534 {
18535 return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p);
18536 }
18537
18538 __extension__ extern __inline float16x8_t
18539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18540 __arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18541 {
18542 return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p);
18543 }
18544
18545 __extension__ extern __inline float32x4_t
18546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18547 __arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18548 {
18549 return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p);
18550 }
18551
18552 __extension__ extern __inline float16x8_t
18553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18554 __arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18555 {
18556 return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p);
18557 }
18558
18559 __extension__ extern __inline float32x4_t
18560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18561 __arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18562 {
18563 return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p);
18564 }
18565
18566 __extension__ extern __inline float16x8_t
18567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18568 __arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18569 {
18570 return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p);
18571 }
18572
18573 __extension__ extern __inline float32x4_t
18574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18575 __arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18576 {
18577 return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p);
18578 }
18579
18580 __extension__ extern __inline float16x8_t
18581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18582 __arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18583 {
18584 return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p);
18585 }
18586
18587 __extension__ extern __inline float32x4_t
18588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18589 __arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18590 {
18591 return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p);
18592 }
18593
18594 __extension__ extern __inline float16x8_t
18595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18596 __arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18597 {
18598 return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p);
18599 }
18600
18601 __extension__ extern __inline float32x4_t
18602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18603 __arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18604 {
18605 return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p);
18606 }
18607
18608 __extension__ extern __inline float16x8_t
18609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18610 __arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18611 {
18612 return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p);
18613 }
18614
18615 __extension__ extern __inline float32x4_t
18616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18617 __arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18618 {
18619 return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p);
18620 }
18621
18622 __extension__ extern __inline float16x8_t
18623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18624 __arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18625 {
18626 return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p);
18627 }
18628
18629 __extension__ extern __inline float32x4_t
18630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18631 __arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18632 {
18633 return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p);
18634 }
18635
18636 __extension__ extern __inline float16x8_t
18637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18638 __arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18639 {
18640 return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p);
18641 }
18642
18643 __extension__ extern __inline float32x4_t
18644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18645 __arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18646 {
18647 return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p);
18648 }
18649
18650 __extension__ extern __inline float16x8_t
18651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18652 __arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18653 {
18654 return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p);
18655 }
18656
18657 __extension__ extern __inline int32x4_t
18658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18659 __arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
18660 {
18661 return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p);
18662 }
18663
18664 __extension__ extern __inline int16x8_t
18665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18666 __arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
18667 {
18668 return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p);
18669 }
18670
18671 __extension__ extern __inline uint32x4_t
18672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18673 __arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
18674 {
18675 return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p);
18676 }
18677
18678 __extension__ extern __inline uint16x8_t
18679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18680 __arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
18681 {
18682 return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p);
18683 }
18684
18685 __extension__ extern __inline float32x4_t
18686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18687 __arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18688 {
18689 return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p);
18690 }
18691
18692 __extension__ extern __inline float16x8_t
18693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18694 __arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18695 {
18696 return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p);
18697 }
18698
18699 __extension__ extern __inline float32x4_t
18700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18701 __arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18702 {
18703 return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p);
18704 }
18705
18706 __extension__ extern __inline float16x8_t
18707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18708 __arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18709 {
18710 return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p);
18711 }
18712
18713 __extension__ extern __inline float32x4_t
18714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18715 __arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
18716 {
18717 return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p);
18718 }
18719
18720 __extension__ extern __inline float16x8_t
18721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18722 __arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
18723 {
18724 return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p);
18725 }
18726
18727 __extension__ extern __inline float32x4_t
18728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18729 __arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
18730 {
18731 return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p);
18732 }
18733
18734 __extension__ extern __inline float16x8_t
18735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18736 __arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
18737 {
18738 return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p);
18739 }
18740
18741 __extension__ extern __inline float32x4_t
18742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18743 __arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
18744 {
18745 return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p);
18746 }
18747
18748 __extension__ extern __inline float16x8_t
18749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18750 __arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
18751 {
18752 return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p);
18753 }
18754
18755 __extension__ extern __inline float32x4_t
18756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18757 __arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18758 {
18759 return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p);
18760 }
18761
18762 __extension__ extern __inline float16x8_t
18763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18764 __arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18765 {
18766 return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p);
18767 }
18768
18769 __extension__ extern __inline float32x4_t
18770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18771 __arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18772 {
18773 return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p);
18774 }
18775
18776 __extension__ extern __inline float16x8_t
18777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18778 __arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18779 {
18780 return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p);
18781 }
18782
18783 __extension__ extern __inline float32x4_t
18784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18785 __arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18786 {
18787 return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p);
18788 }
18789
18790 __extension__ extern __inline float16x8_t
18791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18792 __arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18793 {
18794 return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p);
18795 }
18796
18797 __extension__ extern __inline float32x4_t
18798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18799 __arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18800 {
18801 return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p);
18802 }
18803
18804 __extension__ extern __inline float16x8_t
18805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18806 __arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18807 {
18808 return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p);
18809 }
18810
18811 __extension__ extern __inline float32x4_t
18812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18813 __arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18814 {
18815 return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p);
18816 }
18817
18818 __extension__ extern __inline float16x8_t
18819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18820 __arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18821 {
18822 return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p);
18823 }
18824
18825 __extension__ extern __inline float32x4_t
18826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18827 __arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18828 {
18829 return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p);
18830 }
18831
18832 __extension__ extern __inline float16x8_t
18833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18834 __arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18835 {
18836 return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p);
18837 }
18838
18839 __extension__ extern __inline float32x4_t
18840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18841 __arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
18842 {
18843 return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p);
18844 }
18845
18846 __extension__ extern __inline float16x8_t
18847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18848 __arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
18849 {
18850 return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p);
18851 }
18852
18853 __extension__ extern __inline float32x4_t
18854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18855 __arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
18856 {
18857 return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p);
18858 }
18859
18860 __extension__ extern __inline float16x8_t
18861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18862 __arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
18863 {
18864 return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p);
18865 }
18866
18867 __extension__ extern __inline float32x4_t
18868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18869 __arm_vld1q_f32 (float32_t const * __base)
18870 {
18871 return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base);
18872 }
18873
18874 __extension__ extern __inline float16x8_t
18875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18876 __arm_vld1q_f16 (float16_t const * __base)
18877 {
18878 return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base);
18879 }
18880
18881 __extension__ extern __inline float32x4_t
18882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18883 __arm_vldrwq_f32 (float32_t const * __base)
18884 {
18885 return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base);
18886 }
18887
18888 __extension__ extern __inline float32x4_t
18889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18890 __arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p)
18891 {
18892 return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p);
18893 }
18894
18895 __extension__ extern __inline float16x8_t
18896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18897 __arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p)
18898 {
18899 return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p);
18900 }
18901
18902 __extension__ extern __inline float16x8_t
18903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18904 __arm_vldrhq_f16 (float16_t const * __base)
18905 {
18906 return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base);
18907 }
18908
18909 __extension__ extern __inline float16x8_t
18910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18911 __arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset)
18912 {
18913 return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset);
18914 }
18915
18916 __extension__ extern __inline float16x8_t
18917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18918 __arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
18919 {
18920 return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p);
18921 }
18922
18923 __extension__ extern __inline float16x8_t
18924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18925 __arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset)
18926 {
18927 return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset);
18928 }
18929
18930 __extension__ extern __inline float16x8_t
18931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18932 __arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
18933 {
18934 return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf ((__builtin_neon_hi *) __base, __offset, __p);
18935 }
18936
18937 __extension__ extern __inline float32x4_t
18938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18939 __arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset)
18940 {
18941 return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset);
18942 }
18943
18944 __extension__ extern __inline float32x4_t
18945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18946 __arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
18947 {
18948 return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p);
18949 }
18950
18951 __extension__ extern __inline float32x4_t
18952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18953 __arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset)
18954 {
18955 return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset);
18956 }
18957
18958 __extension__ extern __inline float32x4_t
18959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18960 __arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
18961 {
18962 return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p);
18963 }
18964
18965 __extension__ extern __inline float32x4_t
18966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18967 __arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset)
18968 {
18969 return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset);
18970 }
18971
18972 __extension__ extern __inline float32x4_t
18973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18974 __arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
18975 {
18976 return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf ((__builtin_neon_si *) __base, __offset, __p);
18977 }
18978
18979 __extension__ extern __inline void
18980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18981 __arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
18982 {
18983 __builtin_mve_vstrwq_p_fv4sf ((__builtin_neon_si *) __addr, __value, __p);
18984 }
18985
18986 __extension__ extern __inline void
18987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18988 __arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value)
18989 {
18990 __builtin_mve_vstrwq_fv4sf ((__builtin_neon_si *) __addr, __value);
18991 }
18992
18993 __extension__ extern __inline void
18994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18995 __arm_vst1q_f32 (float32_t * __addr, float32x4_t __value)
18996 {
18997 __builtin_mve_vst1q_fv4sf ((__builtin_neon_si *) __addr, __value);
18998 }
18999
19000 __extension__ extern __inline void
19001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19002 __arm_vst1q_f16 (float16_t * __addr, float16x8_t __value)
19003 {
19004 __builtin_mve_vst1q_fv8hf ((__builtin_neon_hi *) __addr, __value);
19005 }
19006
19007 __extension__ extern __inline void
19008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19009 __arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value)
19010 {
19011 __builtin_mve_vstrhq_fv8hf ((__builtin_neon_hi *) __addr, __value);
19012 }
19013
19014 __extension__ extern __inline void
19015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19016 __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
19017 {
19018 __builtin_mve_vstrhq_p_fv8hf ((__builtin_neon_hi *) __addr, __value, __p);
19019 }
19020
19021 __extension__ extern __inline void
19022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19023 __arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
19024 {
19025 __builtin_mve_vstrhq_scatter_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
19026 }
19027
19028 __extension__ extern __inline void
19029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19030 __arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
19031 {
19032 __builtin_mve_vstrhq_scatter_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
19033 }
19034
19035 __extension__ extern __inline void
19036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19037 __arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
19038 {
19039 __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
19040 }
19041
19042 __extension__ extern __inline void
19043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19044 __arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
19045 {
19046 __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
19047 }
19048
19049 __extension__ extern __inline void
19050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19051 __arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value)
19052 {
19053 __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value);
19054 }
19055
19056 __extension__ extern __inline void
19057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19058 __arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
19059 {
19060 __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p);
19061 }
19062
19063 __extension__ extern __inline void
19064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19065 __arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
19066 {
19067 __builtin_mve_vstrwq_scatter_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
19068 }
19069
19070 __extension__ extern __inline void
19071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19072 __arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
19073 {
19074 __builtin_mve_vstrwq_scatter_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
19075 }
19076
19077 __extension__ extern __inline void
19078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19079 __arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
19080 {
19081 __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
19082 }
19083
19084 __extension__ extern __inline void
19085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19086 __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
19087 {
19088 __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
19089 }
19090
19091 __extension__ extern __inline float16x8_t
19092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19093 __arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
19094 {
19095 return __a + __b;
19096 }
19097
19098 __extension__ extern __inline float32x4_t
19099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19100 __arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
19101 {
19102 return __a + __b;
19103 }
19104
19105 __extension__ extern __inline float32x4_t
19106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19107 __arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset)
19108 {
19109 float32x4_t
19110 result = __builtin_mve_vldrwq_gather_base_nowb_fv4sf (*__addr, __offset);
19111 *__addr = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset);
19112 return result;
19113 }
19114
19115 __extension__ extern __inline float32x4_t
19116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19117 __arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
19118 {
19119 float32x4_t
19120 result = __builtin_mve_vldrwq_gather_base_nowb_z_fv4sf (*__addr, __offset, __p);
19121 *__addr = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p);
19122 return result;
19123 }
19124
19125 __extension__ extern __inline void
19126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19127 __arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value)
19128 {
19129 *__addr = __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value);
19130 }
19131
19132 __extension__ extern __inline void
19133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19134 __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
19135 {
19136 *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p);
19137 }
19138
19139 __extension__ extern __inline float16x8_t
19140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19141 __arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p)
19142 {
19143 return __builtin_mve_vdupq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19144 }
19145
19146 __extension__ extern __inline float32x4_t
19147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19148 __arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p)
19149 {
19150 return __builtin_mve_vdupq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19151 }
19152
19153 __extension__ extern __inline float16x8_t
19154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19155 __arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19156 {
19157 return __builtin_mve_vminnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19158 }
19159
19160 __extension__ extern __inline float32x4_t
19161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19162 __arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19163 {
19164 return __builtin_mve_vminnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19165 }
19166
19167 __extension__ extern __inline float16x8_t
19168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19169 __arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19170 {
19171 return __builtin_mve_vmaxnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19172 }
19173
19174 __extension__ extern __inline float32x4_t
19175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19176 __arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19177 {
19178 return __builtin_mve_vmaxnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19179 }
19180
19181 __extension__ extern __inline float16x8_t
19182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19183 __arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19184 {
19185 return __builtin_mve_vabdq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19186 }
19187
19188 __extension__ extern __inline float32x4_t
19189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19190 __arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19191 {
19192 return __builtin_mve_vabdq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19193 }
19194
19195 __extension__ extern __inline float16x8_t
19196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19197 __arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19198 {
19199 return __builtin_mve_vabsq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19200 }
19201
19202 __extension__ extern __inline float32x4_t
19203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19204 __arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19205 {
19206 return __builtin_mve_vabsq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19207 }
19208
19209 __extension__ extern __inline float16x8_t
19210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19211 __arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19212 {
19213 return __builtin_mve_vaddq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19214 }
19215
19216 __extension__ extern __inline float32x4_t
19217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19218 __arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19219 {
19220 return __builtin_mve_vaddq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19221 }
19222
19223 __extension__ extern __inline float16x8_t
19224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19225 __arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19226 {
19227 return __builtin_mve_vaddq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19228 }
19229
19230 __extension__ extern __inline float32x4_t
19231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19232 __arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19233 {
19234 return __builtin_mve_vaddq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19235 }
19236
19237 __extension__ extern __inline float16x8_t
19238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19239 __arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19240 {
19241 return __builtin_mve_vnegq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19242 }
19243
19244 __extension__ extern __inline float32x4_t
19245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19246 __arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19247 {
19248 return __builtin_mve_vnegq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19249 }
19250
19251 __extension__ extern __inline float16x8_t
19252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19253 __arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19254 {
19255 return __builtin_mve_vmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19256 }
19257
19258 __extension__ extern __inline float32x4_t
19259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19260 __arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19261 {
19262 return __builtin_mve_vmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19263 }
19264
19265 __extension__ extern __inline float16x8_t
19266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19267 __arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19268 {
19269 return __builtin_mve_vmulq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19270 }
19271
19272 __extension__ extern __inline float32x4_t
19273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19274 __arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19275 {
19276 return __builtin_mve_vmulq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19277 }
19278
19279 __extension__ extern __inline float16x8_t
19280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19281 __arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19282 {
19283 return __builtin_mve_vsubq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19284 }
19285
19286 __extension__ extern __inline float32x4_t
19287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19288 __arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19289 {
19290 return __builtin_mve_vsubq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19291 }
19292
19293 __extension__ extern __inline float16x8_t
19294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19295 __arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
19296 {
19297 return __builtin_mve_vsubq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19298 }
19299
19300 __extension__ extern __inline float32x4_t
19301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19302 __arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
19303 {
19304 return __builtin_mve_vsubq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19305 }
19306
19307 __extension__ extern __inline float16x8_t
19308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19309 __arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19310 {
19311 return __builtin_mve_vcaddq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19312 }
19313
19314 __extension__ extern __inline float32x4_t
19315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19316 __arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19317 {
19318 return __builtin_mve_vcaddq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19319 }
19320
19321 __extension__ extern __inline float16x8_t
19322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19323 __arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19324 {
19325 return __builtin_mve_vcaddq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19326 }
19327
19328 __extension__ extern __inline float32x4_t
19329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19330 __arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19331 {
19332 return __builtin_mve_vcaddq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19333 }
19334
19335 __extension__ extern __inline float16x8_t
19336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19337 __arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19338 {
19339 return __builtin_mve_vcmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19340 }
19341
19342 __extension__ extern __inline float32x4_t
19343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19344 __arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19345 {
19346 return __builtin_mve_vcmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19347 }
19348
19349 __extension__ extern __inline float16x8_t
19350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19351 __arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19352 {
19353 return __builtin_mve_vcmulq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19354 }
19355
19356 __extension__ extern __inline float32x4_t
19357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19358 __arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19359 {
19360 return __builtin_mve_vcmulq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19361 }
19362
19363 __extension__ extern __inline float16x8_t
19364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19365 __arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19366 {
19367 return __builtin_mve_vcmulq_rot180_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19368 }
19369
19370 __extension__ extern __inline float32x4_t
19371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19372 __arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19373 {
19374 return __builtin_mve_vcmulq_rot180_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19375 }
19376
19377 __extension__ extern __inline float16x8_t
19378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19379 __arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19380 {
19381 return __builtin_mve_vcmulq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19382 }
19383
19384 __extension__ extern __inline float32x4_t
19385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19386 __arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19387 {
19388 return __builtin_mve_vcmulq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19389 }
19390
19391 __extension__ extern __inline int16x8_t
19392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19393 __arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19394 {
19395 return __builtin_mve_vcvtaq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19396 }
19397
19398 __extension__ extern __inline int32x4_t
19399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19400 __arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19401 {
19402 return __builtin_mve_vcvtaq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19403 }
19404
19405 __extension__ extern __inline uint16x8_t
19406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19407 __arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19408 {
19409 return __builtin_mve_vcvtaq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19410 }
19411
19412 __extension__ extern __inline uint32x4_t
19413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19414 __arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19415 {
19416 return __builtin_mve_vcvtaq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19417 }
19418
19419 __extension__ extern __inline int16x8_t
19420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19421 __arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19422 {
19423 return __builtin_mve_vcvtnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19424 }
19425
19426 __extension__ extern __inline int32x4_t
19427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19428 __arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19429 {
19430 return __builtin_mve_vcvtnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19431 }
19432
19433 __extension__ extern __inline uint16x8_t
19434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19435 __arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19436 {
19437 return __builtin_mve_vcvtnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19438 }
19439
19440 __extension__ extern __inline uint32x4_t
19441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19442 __arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19443 {
19444 return __builtin_mve_vcvtnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19445 }
19446
19447 __extension__ extern __inline int16x8_t
19448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19449 __arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19450 {
19451 return __builtin_mve_vcvtpq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19452 }
19453
19454 __extension__ extern __inline int32x4_t
19455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19456 __arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19457 {
19458 return __builtin_mve_vcvtpq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19459 }
19460
19461 __extension__ extern __inline uint16x8_t
19462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19463 __arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19464 {
19465 return __builtin_mve_vcvtpq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19466 }
19467
19468 __extension__ extern __inline uint32x4_t
19469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19470 __arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19471 {
19472 return __builtin_mve_vcvtpq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19473 }
19474
19475 __extension__ extern __inline int16x8_t
19476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19477 __arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19478 {
19479 return __builtin_mve_vcvtmq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19480 }
19481
19482 __extension__ extern __inline int32x4_t
19483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19484 __arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19485 {
19486 return __builtin_mve_vcvtmq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19487 }
19488
19489 __extension__ extern __inline uint16x8_t
19490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19491 __arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19492 {
19493 return __builtin_mve_vcvtmq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19494 }
19495
19496 __extension__ extern __inline uint32x4_t
19497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19498 __arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19499 {
19500 return __builtin_mve_vcvtmq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19501 }
19502
19503 __extension__ extern __inline float32x4_t
19504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19505 __arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
19506 {
19507 return __builtin_mve_vcvtbq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
19508 }
19509
19510 __extension__ extern __inline float32x4_t
19511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19512 __arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
19513 {
19514 return __builtin_mve_vcvttq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
19515 }
19516
19517 __extension__ extern __inline float16x8_t
19518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19519 __arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p)
19520 {
19521 return __builtin_mve_vcvtq_m_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19522 }
19523
19524 __extension__ extern __inline float16x8_t
19525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19526 __arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p)
19527 {
19528 return __builtin_mve_vcvtq_m_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19529 }
19530
19531 __extension__ extern __inline float32x4_t
19532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19533 __arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p)
19534 {
19535 return __builtin_mve_vcvtq_m_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19536 }
19537
19538 __extension__ extern __inline float32x4_t
19539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19540 __arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p)
19541 {
19542 return __builtin_mve_vcvtq_m_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19543 }
19544
19545 __extension__ extern __inline float16x8_t
19546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19547 __arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p)
19548 {
19549 return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
19550 }
19551
19552 __extension__ extern __inline float16x8_t
19553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19554 __arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
19555 {
19556 return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
19557 }
19558
19559 __extension__ extern __inline float32x4_t
19560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19561 __arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p)
19562 {
19563 return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
19564 }
19565
19566 __extension__ extern __inline float32x4_t
19567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19568 __arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
19569 {
19570 return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
19571 }
19572
19573 __extension__ extern __inline int16x8_t
19574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19575 __arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
19576 {
19577 return __builtin_mve_vcvtq_m_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
19578 }
19579
19580 __extension__ extern __inline int32x4_t
19581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19582 __arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
19583 {
19584 return __builtin_mve_vcvtq_m_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
19585 }
19586
19587 __extension__ extern __inline uint16x8_t
19588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19589 __arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
19590 {
19591 return __builtin_mve_vcvtq_m_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
19592 }
19593
19594 __extension__ extern __inline uint32x4_t
19595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19596 __arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
19597 {
19598 return __builtin_mve_vcvtq_m_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
19599 }
19600
19601 __extension__ extern __inline int16x8_t
19602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19603 __arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
19604 {
19605 return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm6, __p);
19606 }
19607
19608 __extension__ extern __inline int32x4_t
19609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19610 __arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
19611 {
19612 return __builtin_mve_vcvtq_m_n_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __imm6, __p);
19613 }
19614
19615 __extension__ extern __inline uint16x8_t
19616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19617 __arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
19618 {
19619 return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm6, __p);
19620 }
19621
19622 __extension__ extern __inline uint32x4_t
19623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19624 __arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
19625 {
19626 return __builtin_mve_vcvtq_m_n_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __imm6, __p);
19627 }
19628
19629 __extension__ extern __inline float16x8_t
19630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19631 __arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19632 {
19633 return __builtin_mve_vrndq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19634 }
19635
19636 __extension__ extern __inline float32x4_t
19637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19638 __arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19639 {
19640 return __builtin_mve_vrndq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19641 }
19642
19643 __extension__ extern __inline float16x8_t
19644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19645 __arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19646 {
19647 return __builtin_mve_vrndnq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19648 }
19649
19650 __extension__ extern __inline float32x4_t
19651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19652 __arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19653 {
19654 return __builtin_mve_vrndnq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19655 }
19656
19657 __extension__ extern __inline float16x8_t
19658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19659 __arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19660 {
19661 return __builtin_mve_vrndmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19662 }
19663
19664 __extension__ extern __inline float32x4_t
19665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19666 __arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19667 {
19668 return __builtin_mve_vrndmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19669 }
19670
19671 __extension__ extern __inline float16x8_t
19672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19673 __arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19674 {
19675 return __builtin_mve_vrndpq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19676 }
19677
19678 __extension__ extern __inline float32x4_t
19679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19680 __arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19681 {
19682 return __builtin_mve_vrndpq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19683 }
19684
19685 __extension__ extern __inline float16x8_t
19686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19687 __arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19688 {
19689 return __builtin_mve_vrndaq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19690 }
19691
19692 __extension__ extern __inline float32x4_t
19693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19694 __arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19695 {
19696 return __builtin_mve_vrndaq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19697 }
19698
19699 __extension__ extern __inline float16x8_t
19700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19701 __arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p)
19702 {
19703 return __builtin_mve_vrndxq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19704 }
19705
19706 __extension__ extern __inline float32x4_t
19707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19708 __arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p)
19709 {
19710 return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19711 }
19712
19713 __extension__ extern __inline float16x8_t
19714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19715 __arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19716 {
19717 return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19718 }
19719
19720 __extension__ extern __inline float32x4_t
19721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19722 __arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19723 {
19724 return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19725 }
19726
19727 __extension__ extern __inline float16x8_t
19728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19729 __arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19730 {
19731 return __builtin_mve_vbicq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19732 }
19733
19734 __extension__ extern __inline float32x4_t
19735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19736 __arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19737 {
19738 return __builtin_mve_vbicq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19739 }
19740
19741 __extension__ extern __inline float16x8_t
19742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19743 __arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p)
19744 {
19745 return __builtin_mve_vbrsrq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19746 }
19747
19748 __extension__ extern __inline float32x4_t
19749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19750 __arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p)
19751 {
19752 return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19753 }
19754
19755 __extension__ extern __inline float16x8_t
19756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19757 __arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19758 {
19759 return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19760 }
19761
19762 __extension__ extern __inline float32x4_t
19763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19764 __arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19765 {
19766 return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19767 }
19768
19769 __extension__ extern __inline float16x8_t
19770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19771 __arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19772 {
19773 return __builtin_mve_vornq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19774 }
19775
19776 __extension__ extern __inline float32x4_t
19777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19778 __arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19779 {
19780 return __builtin_mve_vornq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19781 }
19782
19783 __extension__ extern __inline float16x8_t
19784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19785 __arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
19786 {
19787 return __builtin_mve_vorrq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
19788 }
19789
19790 __extension__ extern __inline float32x4_t
19791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19792 __arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
19793 {
19794 return __builtin_mve_vorrq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
19795 }
19796
19797 __extension__ extern __inline float16x8_t
19798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19799 __arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p)
19800 {
19801 return __builtin_mve_vrev32q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19802 }
19803
19804 __extension__ extern __inline float16x8_t
19805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19806 __arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p)
19807 {
19808 return __builtin_mve_vrev64q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
19809 }
19810
19811 __extension__ extern __inline float32x4_t
19812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19813 __arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p)
19814 {
19815 return __builtin_mve_vrev64q_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
19816 }
19817
19818 __extension__ extern __inline float16x8x4_t
19819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19820 __arm_vld4q_f16 (float16_t const * __addr)
19821 {
19822 union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
19823 __rv.__o = __builtin_mve_vld4qv8hf (__addr);
19824 return __rv.__i;
19825 }
19826
19827 __extension__ extern __inline float16x8x2_t
19828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19829 __arm_vld2q_f16 (float16_t const * __addr)
19830 {
19831 union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
19832 __rv.__o = __builtin_mve_vld2qv8hf (__addr);
19833 return __rv.__i;
19834 }
19835
19836 __extension__ extern __inline float16x8_t
19837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19838 __arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p)
19839 {
19840 return vldrhq_z_f16 (__base, __p);
19841 }
19842
19843 __extension__ extern __inline void
19844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19845 __arm_vst2q_f16 (float16_t * __addr, float16x8x2_t __value)
19846 {
19847 union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
19848 __rv.__i = __value;
19849 __builtin_mve_vst2qv8hf (__addr, __rv.__o);
19850 }
19851
19852 __extension__ extern __inline void
19853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19854 __arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
19855 {
19856 return vstrhq_p_f16 (__addr, __value, __p);
19857 }
19858
19859 __extension__ extern __inline float32x4x4_t
19860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19861 __arm_vld4q_f32 (float32_t const * __addr)
19862 {
19863 union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
19864 __rv.__o = __builtin_mve_vld4qv4sf (__addr);
19865 return __rv.__i;
19866 }
19867
19868 __extension__ extern __inline float32x4x2_t
19869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19870 __arm_vld2q_f32 (float32_t const * __addr)
19871 {
19872 union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
19873 __rv.__o = __builtin_mve_vld2qv4sf (__addr);
19874 return __rv.__i;
19875 }
19876
19877 __extension__ extern __inline float32x4_t
19878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19879 __arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p)
19880 {
19881 return vldrwq_z_f32 (__base, __p);
19882 }
19883
19884 __extension__ extern __inline void
19885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19886 __arm_vst2q_f32 (float32_t * __addr, float32x4x2_t __value)
19887 {
19888 union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
19889 __rv.__i = __value;
19890 __builtin_mve_vst2qv4sf (__addr, __rv.__o);
19891 }
19892
19893 __extension__ extern __inline void
19894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19895 __arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
19896 {
19897 return vstrwq_p_f32 (__addr, __value, __p);
19898 }
19899
19900 __extension__ extern __inline float16x8_t
19901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19902 __arm_vsetq_lane_f16 (float16_t __a, float16x8_t __b, const int __idx)
19903 {
19904 __ARM_CHECK_LANEQ (__b, __idx);
19905 __b[__ARM_LANEQ(__b,__idx)] = __a;
19906 return __b;
19907 }
19908
19909 __extension__ extern __inline float32x4_t
19910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19911 __arm_vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __idx)
19912 {
19913 __ARM_CHECK_LANEQ (__b, __idx);
19914 __b[__ARM_LANEQ(__b,__idx)] = __a;
19915 return __b;
19916 }
19917
19918 __extension__ extern __inline float16_t
19919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19920 __arm_vgetq_lane_f16 (float16x8_t __a, const int __idx)
19921 {
19922 __ARM_CHECK_LANEQ (__a, __idx);
19923 return __a[__ARM_LANEQ(__a,__idx)];
19924 }
19925
19926 __extension__ extern __inline float32_t
19927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19928 __arm_vgetq_lane_f32 (float32x4_t __a, const int __idx)
19929 {
19930 __ARM_CHECK_LANEQ (__a, __idx);
19931 return __a[__ARM_LANEQ(__a,__idx)];
19932 }
19933 #endif
19934
19935 #ifdef __cplusplus
19936 __extension__ extern __inline void
19937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19938 __arm_vst4q (int8_t * __addr, int8x16x4_t __value)
19939 {
19940 __arm_vst4q_s8 (__addr, __value);
19941 }
19942
19943 __extension__ extern __inline void
19944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19945 __arm_vst4q (int16_t * __addr, int16x8x4_t __value)
19946 {
19947 __arm_vst4q_s16 (__addr, __value);
19948 }
19949
19950 __extension__ extern __inline void
19951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19952 __arm_vst4q (int32_t * __addr, int32x4x4_t __value)
19953 {
19954 __arm_vst4q_s32 (__addr, __value);
19955 }
19956
19957 __extension__ extern __inline void
19958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19959 __arm_vst4q (uint8_t * __addr, uint8x16x4_t __value)
19960 {
19961 __arm_vst4q_u8 (__addr, __value);
19962 }
19963
19964 __extension__ extern __inline void
19965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19966 __arm_vst4q (uint16_t * __addr, uint16x8x4_t __value)
19967 {
19968 __arm_vst4q_u16 (__addr, __value);
19969 }
19970
19971 __extension__ extern __inline void
19972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19973 __arm_vst4q (uint32_t * __addr, uint32x4x4_t __value)
19974 {
19975 __arm_vst4q_u32 (__addr, __value);
19976 }
19977
19978 __extension__ extern __inline int8x16_t
19979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19980 __arm_vdupq_n (int8_t __a)
19981 {
19982 return __arm_vdupq_n_s8 (__a);
19983 }
19984
19985 __extension__ extern __inline int16x8_t
19986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19987 __arm_vdupq_n (int16_t __a)
19988 {
19989 return __arm_vdupq_n_s16 (__a);
19990 }
19991
19992 __extension__ extern __inline int32x4_t
19993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19994 __arm_vdupq_n (int32_t __a)
19995 {
19996 return __arm_vdupq_n_s32 (__a);
19997 }
19998
19999 __extension__ extern __inline int8x16_t
20000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20001 __arm_vabsq (int8x16_t __a)
20002 {
20003 return __arm_vabsq_s8 (__a);
20004 }
20005
20006 __extension__ extern __inline int16x8_t
20007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20008 __arm_vabsq (int16x8_t __a)
20009 {
20010 return __arm_vabsq_s16 (__a);
20011 }
20012
20013 __extension__ extern __inline int32x4_t
20014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20015 __arm_vabsq (int32x4_t __a)
20016 {
20017 return __arm_vabsq_s32 (__a);
20018 }
20019
20020 __extension__ extern __inline int8x16_t
20021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20022 __arm_vclsq (int8x16_t __a)
20023 {
20024 return __arm_vclsq_s8 (__a);
20025 }
20026
20027 __extension__ extern __inline int16x8_t
20028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20029 __arm_vclsq (int16x8_t __a)
20030 {
20031 return __arm_vclsq_s16 (__a);
20032 }
20033
20034 __extension__ extern __inline int32x4_t
20035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20036 __arm_vclsq (int32x4_t __a)
20037 {
20038 return __arm_vclsq_s32 (__a);
20039 }
20040
20041 __extension__ extern __inline int8x16_t
20042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20043 __arm_vclzq (int8x16_t __a)
20044 {
20045 return __arm_vclzq_s8 (__a);
20046 }
20047
20048 __extension__ extern __inline int16x8_t
20049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20050 __arm_vclzq (int16x8_t __a)
20051 {
20052 return __arm_vclzq_s16 (__a);
20053 }
20054
20055 __extension__ extern __inline int32x4_t
20056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20057 __arm_vclzq (int32x4_t __a)
20058 {
20059 return __arm_vclzq_s32 (__a);
20060 }
20061
20062 __extension__ extern __inline int8x16_t
20063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20064 __arm_vnegq (int8x16_t __a)
20065 {
20066 return __arm_vnegq_s8 (__a);
20067 }
20068
20069 __extension__ extern __inline int16x8_t
20070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20071 __arm_vnegq (int16x8_t __a)
20072 {
20073 return __arm_vnegq_s16 (__a);
20074 }
20075
20076 __extension__ extern __inline int32x4_t
20077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20078 __arm_vnegq (int32x4_t __a)
20079 {
20080 return __arm_vnegq_s32 (__a);
20081 }
20082
20083 __extension__ extern __inline int64_t
20084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20085 __arm_vaddlvq (int32x4_t __a)
20086 {
20087 return __arm_vaddlvq_s32 (__a);
20088 }
20089
20090 __extension__ extern __inline int32_t
20091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20092 __arm_vaddvq (int8x16_t __a)
20093 {
20094 return __arm_vaddvq_s8 (__a);
20095 }
20096
20097 __extension__ extern __inline int32_t
20098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20099 __arm_vaddvq (int16x8_t __a)
20100 {
20101 return __arm_vaddvq_s16 (__a);
20102 }
20103
20104 __extension__ extern __inline int32_t
20105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20106 __arm_vaddvq (int32x4_t __a)
20107 {
20108 return __arm_vaddvq_s32 (__a);
20109 }
20110
20111 __extension__ extern __inline int16x8_t
20112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20113 __arm_vmovlbq (int8x16_t __a)
20114 {
20115 return __arm_vmovlbq_s8 (__a);
20116 }
20117
20118 __extension__ extern __inline int32x4_t
20119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20120 __arm_vmovlbq (int16x8_t __a)
20121 {
20122 return __arm_vmovlbq_s16 (__a);
20123 }
20124
20125 __extension__ extern __inline int16x8_t
20126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20127 __arm_vmovltq (int8x16_t __a)
20128 {
20129 return __arm_vmovltq_s8 (__a);
20130 }
20131
20132 __extension__ extern __inline int32x4_t
20133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20134 __arm_vmovltq (int16x8_t __a)
20135 {
20136 return __arm_vmovltq_s16 (__a);
20137 }
20138
20139 __extension__ extern __inline int8x16_t
20140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20141 __arm_vmvnq (int8x16_t __a)
20142 {
20143 return __arm_vmvnq_s8 (__a);
20144 }
20145
20146 __extension__ extern __inline int16x8_t
20147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20148 __arm_vmvnq (int16x8_t __a)
20149 {
20150 return __arm_vmvnq_s16 (__a);
20151 }
20152
20153 __extension__ extern __inline int32x4_t
20154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20155 __arm_vmvnq (int32x4_t __a)
20156 {
20157 return __arm_vmvnq_s32 (__a);
20158 }
20159
20160 __extension__ extern __inline int8x16_t
20161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20162 __arm_vrev16q (int8x16_t __a)
20163 {
20164 return __arm_vrev16q_s8 (__a);
20165 }
20166
20167 __extension__ extern __inline int8x16_t
20168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20169 __arm_vrev32q (int8x16_t __a)
20170 {
20171 return __arm_vrev32q_s8 (__a);
20172 }
20173
20174 __extension__ extern __inline int16x8_t
20175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20176 __arm_vrev32q (int16x8_t __a)
20177 {
20178 return __arm_vrev32q_s16 (__a);
20179 }
20180
20181 __extension__ extern __inline int8x16_t
20182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20183 __arm_vrev64q (int8x16_t __a)
20184 {
20185 return __arm_vrev64q_s8 (__a);
20186 }
20187
20188 __extension__ extern __inline int16x8_t
20189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20190 __arm_vrev64q (int16x8_t __a)
20191 {
20192 return __arm_vrev64q_s16 (__a);
20193 }
20194
20195 __extension__ extern __inline int32x4_t
20196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20197 __arm_vrev64q (int32x4_t __a)
20198 {
20199 return __arm_vrev64q_s32 (__a);
20200 }
20201
20202 __extension__ extern __inline int8x16_t
20203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20204 __arm_vqabsq (int8x16_t __a)
20205 {
20206 return __arm_vqabsq_s8 (__a);
20207 }
20208
20209 __extension__ extern __inline int16x8_t
20210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20211 __arm_vqabsq (int16x8_t __a)
20212 {
20213 return __arm_vqabsq_s16 (__a);
20214 }
20215
20216 __extension__ extern __inline int32x4_t
20217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20218 __arm_vqabsq (int32x4_t __a)
20219 {
20220 return __arm_vqabsq_s32 (__a);
20221 }
20222
20223 __extension__ extern __inline int8x16_t
20224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20225 __arm_vqnegq (int8x16_t __a)
20226 {
20227 return __arm_vqnegq_s8 (__a);
20228 }
20229
20230 __extension__ extern __inline int16x8_t
20231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20232 __arm_vqnegq (int16x8_t __a)
20233 {
20234 return __arm_vqnegq_s16 (__a);
20235 }
20236
20237 __extension__ extern __inline int32x4_t
20238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20239 __arm_vqnegq (int32x4_t __a)
20240 {
20241 return __arm_vqnegq_s32 (__a);
20242 }
20243
20244 __extension__ extern __inline uint8x16_t
20245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20246 __arm_vrev64q (uint8x16_t __a)
20247 {
20248 return __arm_vrev64q_u8 (__a);
20249 }
20250
20251 __extension__ extern __inline uint16x8_t
20252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20253 __arm_vrev64q (uint16x8_t __a)
20254 {
20255 return __arm_vrev64q_u16 (__a);
20256 }
20257
20258 __extension__ extern __inline uint32x4_t
20259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20260 __arm_vrev64q (uint32x4_t __a)
20261 {
20262 return __arm_vrev64q_u32 (__a);
20263 }
20264
20265 __extension__ extern __inline uint8x16_t
20266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20267 __arm_vmvnq (uint8x16_t __a)
20268 {
20269 return __arm_vmvnq_u8 (__a);
20270 }
20271
20272 __extension__ extern __inline uint16x8_t
20273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20274 __arm_vmvnq (uint16x8_t __a)
20275 {
20276 return __arm_vmvnq_u16 (__a);
20277 }
20278
20279 __extension__ extern __inline uint32x4_t
20280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20281 __arm_vmvnq (uint32x4_t __a)
20282 {
20283 return __arm_vmvnq_u32 (__a);
20284 }
20285
20286 __extension__ extern __inline uint8x16_t
20287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20288 __arm_vdupq_n (uint8_t __a)
20289 {
20290 return __arm_vdupq_n_u8 (__a);
20291 }
20292
20293 __extension__ extern __inline uint16x8_t
20294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20295 __arm_vdupq_n (uint16_t __a)
20296 {
20297 return __arm_vdupq_n_u16 (__a);
20298 }
20299
20300 __extension__ extern __inline uint32x4_t
20301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20302 __arm_vdupq_n (uint32_t __a)
20303 {
20304 return __arm_vdupq_n_u32 (__a);
20305 }
20306
20307 __extension__ extern __inline uint8x16_t
20308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20309 __arm_vclzq (uint8x16_t __a)
20310 {
20311 return __arm_vclzq_u8 (__a);
20312 }
20313
20314 __extension__ extern __inline uint16x8_t
20315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20316 __arm_vclzq (uint16x8_t __a)
20317 {
20318 return __arm_vclzq_u16 (__a);
20319 }
20320
20321 __extension__ extern __inline uint32x4_t
20322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20323 __arm_vclzq (uint32x4_t __a)
20324 {
20325 return __arm_vclzq_u32 (__a);
20326 }
20327
20328 __extension__ extern __inline uint32_t
20329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20330 __arm_vaddvq (uint8x16_t __a)
20331 {
20332 return __arm_vaddvq_u8 (__a);
20333 }
20334
20335 __extension__ extern __inline uint32_t
20336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20337 __arm_vaddvq (uint16x8_t __a)
20338 {
20339 return __arm_vaddvq_u16 (__a);
20340 }
20341
20342 __extension__ extern __inline uint32_t
20343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20344 __arm_vaddvq (uint32x4_t __a)
20345 {
20346 return __arm_vaddvq_u32 (__a);
20347 }
20348
20349 __extension__ extern __inline uint8x16_t
20350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20351 __arm_vrev32q (uint8x16_t __a)
20352 {
20353 return __arm_vrev32q_u8 (__a);
20354 }
20355
20356 __extension__ extern __inline uint16x8_t
20357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20358 __arm_vrev32q (uint16x8_t __a)
20359 {
20360 return __arm_vrev32q_u16 (__a);
20361 }
20362
20363 __extension__ extern __inline uint16x8_t
20364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20365 __arm_vmovltq (uint8x16_t __a)
20366 {
20367 return __arm_vmovltq_u8 (__a);
20368 }
20369
20370 __extension__ extern __inline uint32x4_t
20371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20372 __arm_vmovltq (uint16x8_t __a)
20373 {
20374 return __arm_vmovltq_u16 (__a);
20375 }
20376
20377 __extension__ extern __inline uint16x8_t
20378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20379 __arm_vmovlbq (uint8x16_t __a)
20380 {
20381 return __arm_vmovlbq_u8 (__a);
20382 }
20383
20384 __extension__ extern __inline uint32x4_t
20385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20386 __arm_vmovlbq (uint16x8_t __a)
20387 {
20388 return __arm_vmovlbq_u16 (__a);
20389 }
20390
20391 __extension__ extern __inline uint8x16_t
20392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20393 __arm_vrev16q (uint8x16_t __a)
20394 {
20395 return __arm_vrev16q_u8 (__a);
20396 }
20397
20398 __extension__ extern __inline uint64_t
20399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20400 __arm_vaddlvq (uint32x4_t __a)
20401 {
20402 return __arm_vaddlvq_u32 (__a);
20403 }
20404
20405 __extension__ extern __inline int8x16_t
20406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20407 __arm_vshrq (int8x16_t __a, const int __imm)
20408 {
20409 return __arm_vshrq_n_s8 (__a, __imm);
20410 }
20411
20412 __extension__ extern __inline int16x8_t
20413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20414 __arm_vshrq (int16x8_t __a, const int __imm)
20415 {
20416 return __arm_vshrq_n_s16 (__a, __imm);
20417 }
20418
20419 __extension__ extern __inline int32x4_t
20420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20421 __arm_vshrq (int32x4_t __a, const int __imm)
20422 {
20423 return __arm_vshrq_n_s32 (__a, __imm);
20424 }
20425
20426 __extension__ extern __inline uint8x16_t
20427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20428 __arm_vshrq (uint8x16_t __a, const int __imm)
20429 {
20430 return __arm_vshrq_n_u8 (__a, __imm);
20431 }
20432
20433 __extension__ extern __inline uint16x8_t
20434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20435 __arm_vshrq (uint16x8_t __a, const int __imm)
20436 {
20437 return __arm_vshrq_n_u16 (__a, __imm);
20438 }
20439
20440 __extension__ extern __inline uint32x4_t
20441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20442 __arm_vshrq (uint32x4_t __a, const int __imm)
20443 {
20444 return __arm_vshrq_n_u32 (__a, __imm);
20445 }
20446
20447 __extension__ extern __inline int64_t
20448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20449 __arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p)
20450 {
20451 return __arm_vaddlvq_p_s32 (__a, __p);
20452 }
20453
20454 __extension__ extern __inline uint64_t
20455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20456 __arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p)
20457 {
20458 return __arm_vaddlvq_p_u32 (__a, __p);
20459 }
20460
20461 __extension__ extern __inline int32_t
20462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20463 __arm_vcmpneq (int8x16_t __a, int8x16_t __b)
20464 {
20465 return __arm_vcmpneq_s8 (__a, __b);
20466 }
20467
20468 __extension__ extern __inline mve_pred16_t
20469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20470 __arm_vcmpneq (int16x8_t __a, int16x8_t __b)
20471 {
20472 return __arm_vcmpneq_s16 (__a, __b);
20473 }
20474
20475 __extension__ extern __inline mve_pred16_t
20476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20477 __arm_vcmpneq (int32x4_t __a, int32x4_t __b)
20478 {
20479 return __arm_vcmpneq_s32 (__a, __b);
20480 }
20481
20482 __extension__ extern __inline mve_pred16_t
20483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20484 __arm_vcmpneq (uint8x16_t __a, uint8x16_t __b)
20485 {
20486 return __arm_vcmpneq_u8 (__a, __b);
20487 }
20488
20489 __extension__ extern __inline mve_pred16_t
20490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20491 __arm_vcmpneq (uint16x8_t __a, uint16x8_t __b)
20492 {
20493 return __arm_vcmpneq_u16 (__a, __b);
20494 }
20495
20496 __extension__ extern __inline mve_pred16_t
20497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20498 __arm_vcmpneq (uint32x4_t __a, uint32x4_t __b)
20499 {
20500 return __arm_vcmpneq_u32 (__a, __b);
20501 }
20502
20503 __extension__ extern __inline int8x16_t
20504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20505 __arm_vshlq (int8x16_t __a, int8x16_t __b)
20506 {
20507 return __arm_vshlq_s8 (__a, __b);
20508 }
20509
20510 __extension__ extern __inline int16x8_t
20511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20512 __arm_vshlq (int16x8_t __a, int16x8_t __b)
20513 {
20514 return __arm_vshlq_s16 (__a, __b);
20515 }
20516
20517 __extension__ extern __inline int32x4_t
20518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20519 __arm_vshlq (int32x4_t __a, int32x4_t __b)
20520 {
20521 return __arm_vshlq_s32 (__a, __b);
20522 }
20523
20524 __extension__ extern __inline uint8x16_t
20525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20526 __arm_vshlq (uint8x16_t __a, int8x16_t __b)
20527 {
20528 return __arm_vshlq_u8 (__a, __b);
20529 }
20530
20531 __extension__ extern __inline uint16x8_t
20532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20533 __arm_vshlq (uint16x8_t __a, int16x8_t __b)
20534 {
20535 return __arm_vshlq_u16 (__a, __b);
20536 }
20537
20538 __extension__ extern __inline uint32x4_t
20539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20540 __arm_vshlq (uint32x4_t __a, int32x4_t __b)
20541 {
20542 return __arm_vshlq_u32 (__a, __b);
20543 }
20544
20545 __extension__ extern __inline uint8x16_t
20546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20547 __arm_vsubq (uint8x16_t __a, uint8x16_t __b)
20548 {
20549 return __arm_vsubq_u8 (__a, __b);
20550 }
20551
20552 __extension__ extern __inline uint8x16_t
20553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20554 __arm_vsubq (uint8x16_t __a, uint8_t __b)
20555 {
20556 return __arm_vsubq_n_u8 (__a, __b);
20557 }
20558
20559 __extension__ extern __inline uint8x16_t
20560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20561 __arm_vrmulhq (uint8x16_t __a, uint8x16_t __b)
20562 {
20563 return __arm_vrmulhq_u8 (__a, __b);
20564 }
20565
20566 __extension__ extern __inline uint8x16_t
20567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20568 __arm_vrhaddq (uint8x16_t __a, uint8x16_t __b)
20569 {
20570 return __arm_vrhaddq_u8 (__a, __b);
20571 }
20572
20573 __extension__ extern __inline uint8x16_t
20574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20575 __arm_vqsubq (uint8x16_t __a, uint8x16_t __b)
20576 {
20577 return __arm_vqsubq_u8 (__a, __b);
20578 }
20579
20580 __extension__ extern __inline uint8x16_t
20581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20582 __arm_vqsubq (uint8x16_t __a, uint8_t __b)
20583 {
20584 return __arm_vqsubq_n_u8 (__a, __b);
20585 }
20586
20587 __extension__ extern __inline uint8x16_t
20588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20589 __arm_vqaddq (uint8x16_t __a, uint8x16_t __b)
20590 {
20591 return __arm_vqaddq_u8 (__a, __b);
20592 }
20593
20594 __extension__ extern __inline uint8x16_t
20595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20596 __arm_vqaddq (uint8x16_t __a, uint8_t __b)
20597 {
20598 return __arm_vqaddq_n_u8 (__a, __b);
20599 }
20600
20601 __extension__ extern __inline uint8x16_t
20602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20603 __arm_vorrq (uint8x16_t __a, uint8x16_t __b)
20604 {
20605 return __arm_vorrq_u8 (__a, __b);
20606 }
20607
20608 __extension__ extern __inline uint8x16_t
20609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20610 __arm_vornq (uint8x16_t __a, uint8x16_t __b)
20611 {
20612 return __arm_vornq_u8 (__a, __b);
20613 }
20614
20615 __extension__ extern __inline uint8x16_t
20616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20617 __arm_vmulq (uint8x16_t __a, uint8x16_t __b)
20618 {
20619 return __arm_vmulq_u8 (__a, __b);
20620 }
20621
20622 __extension__ extern __inline uint8x16_t
20623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20624 __arm_vmulq (uint8x16_t __a, uint8_t __b)
20625 {
20626 return __arm_vmulq_n_u8 (__a, __b);
20627 }
20628
20629 __extension__ extern __inline uint16x8_t
20630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20631 __arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b)
20632 {
20633 return __arm_vmulltq_int_u8 (__a, __b);
20634 }
20635
20636 __extension__ extern __inline uint16x8_t
20637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20638 __arm_vmullbq_int (uint8x16_t __a, uint8x16_t __b)
20639 {
20640 return __arm_vmullbq_int_u8 (__a, __b);
20641 }
20642
20643 __extension__ extern __inline uint8x16_t
20644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20645 __arm_vmulhq (uint8x16_t __a, uint8x16_t __b)
20646 {
20647 return __arm_vmulhq_u8 (__a, __b);
20648 }
20649
20650 __extension__ extern __inline uint32_t
20651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20652 __arm_vmladavq (uint8x16_t __a, uint8x16_t __b)
20653 {
20654 return __arm_vmladavq_u8 (__a, __b);
20655 }
20656
20657 __extension__ extern __inline uint8_t
20658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20659 __arm_vminvq (uint8_t __a, uint8x16_t __b)
20660 {
20661 return __arm_vminvq_u8 (__a, __b);
20662 }
20663
20664 __extension__ extern __inline uint8x16_t
20665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20666 __arm_vminq (uint8x16_t __a, uint8x16_t __b)
20667 {
20668 return __arm_vminq_u8 (__a, __b);
20669 }
20670
20671 __extension__ extern __inline uint8_t
20672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20673 __arm_vmaxvq (uint8_t __a, uint8x16_t __b)
20674 {
20675 return __arm_vmaxvq_u8 (__a, __b);
20676 }
20677
20678 __extension__ extern __inline uint8x16_t
20679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20680 __arm_vmaxq (uint8x16_t __a, uint8x16_t __b)
20681 {
20682 return __arm_vmaxq_u8 (__a, __b);
20683 }
20684
20685 __extension__ extern __inline uint8x16_t
20686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20687 __arm_vhsubq (uint8x16_t __a, uint8x16_t __b)
20688 {
20689 return __arm_vhsubq_u8 (__a, __b);
20690 }
20691
20692 __extension__ extern __inline uint8x16_t
20693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20694 __arm_vhsubq (uint8x16_t __a, uint8_t __b)
20695 {
20696 return __arm_vhsubq_n_u8 (__a, __b);
20697 }
20698
20699 __extension__ extern __inline uint8x16_t
20700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20701 __arm_vhaddq (uint8x16_t __a, uint8x16_t __b)
20702 {
20703 return __arm_vhaddq_u8 (__a, __b);
20704 }
20705
20706 __extension__ extern __inline uint8x16_t
20707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20708 __arm_vhaddq (uint8x16_t __a, uint8_t __b)
20709 {
20710 return __arm_vhaddq_n_u8 (__a, __b);
20711 }
20712
20713 __extension__ extern __inline uint8x16_t
20714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20715 __arm_veorq (uint8x16_t __a, uint8x16_t __b)
20716 {
20717 return __arm_veorq_u8 (__a, __b);
20718 }
20719
20720 __extension__ extern __inline mve_pred16_t
20721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20722 __arm_vcmpneq (uint8x16_t __a, uint8_t __b)
20723 {
20724 return __arm_vcmpneq_n_u8 (__a, __b);
20725 }
20726
20727 __extension__ extern __inline mve_pred16_t
20728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20729 __arm_vcmphiq (uint8x16_t __a, uint8x16_t __b)
20730 {
20731 return __arm_vcmphiq_u8 (__a, __b);
20732 }
20733
20734 __extension__ extern __inline mve_pred16_t
20735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20736 __arm_vcmphiq (uint8x16_t __a, uint8_t __b)
20737 {
20738 return __arm_vcmphiq_n_u8 (__a, __b);
20739 }
20740
20741 __extension__ extern __inline mve_pred16_t
20742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20743 __arm_vcmpeqq (uint8x16_t __a, uint8x16_t __b)
20744 {
20745 return __arm_vcmpeqq_u8 (__a, __b);
20746 }
20747
20748 __extension__ extern __inline mve_pred16_t
20749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20750 __arm_vcmpeqq (uint8x16_t __a, uint8_t __b)
20751 {
20752 return __arm_vcmpeqq_n_u8 (__a, __b);
20753 }
20754
20755 __extension__ extern __inline mve_pred16_t
20756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20757 __arm_vcmpcsq (uint8x16_t __a, uint8x16_t __b)
20758 {
20759 return __arm_vcmpcsq_u8 (__a, __b);
20760 }
20761
20762 __extension__ extern __inline mve_pred16_t
20763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20764 __arm_vcmpcsq (uint8x16_t __a, uint8_t __b)
20765 {
20766 return __arm_vcmpcsq_n_u8 (__a, __b);
20767 }
20768
20769 __extension__ extern __inline uint8x16_t
20770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20771 __arm_vcaddq_rot90 (uint8x16_t __a, uint8x16_t __b)
20772 {
20773 return __arm_vcaddq_rot90_u8 (__a, __b);
20774 }
20775
20776 __extension__ extern __inline uint8x16_t
20777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20778 __arm_vcaddq_rot270 (uint8x16_t __a, uint8x16_t __b)
20779 {
20780 return __arm_vcaddq_rot270_u8 (__a, __b);
20781 }
20782
20783 __extension__ extern __inline uint8x16_t
20784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20785 __arm_vbicq (uint8x16_t __a, uint8x16_t __b)
20786 {
20787 return __arm_vbicq_u8 (__a, __b);
20788 }
20789
20790 __extension__ extern __inline uint8x16_t
20791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20792 __arm_vandq (uint8x16_t __a, uint8x16_t __b)
20793 {
20794 return __arm_vandq_u8 (__a, __b);
20795 }
20796
20797 __extension__ extern __inline uint32_t
20798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20799 __arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p)
20800 {
20801 return __arm_vaddvq_p_u8 (__a, __p);
20802 }
20803
20804 __extension__ extern __inline uint32_t
20805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20806 __arm_vaddvaq (uint32_t __a, uint8x16_t __b)
20807 {
20808 return __arm_vaddvaq_u8 (__a, __b);
20809 }
20810
20811 __extension__ extern __inline uint8x16_t
20812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20813 __arm_vaddq (uint8x16_t __a, uint8_t __b)
20814 {
20815 return __arm_vaddq_n_u8 (__a, __b);
20816 }
20817
20818 __extension__ extern __inline uint8x16_t
20819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20820 __arm_vabdq (uint8x16_t __a, uint8x16_t __b)
20821 {
20822 return __arm_vabdq_u8 (__a, __b);
20823 }
20824
20825 __extension__ extern __inline uint8x16_t
20826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20827 __arm_vshlq_r (uint8x16_t __a, int32_t __b)
20828 {
20829 return __arm_vshlq_r_u8 (__a, __b);
20830 }
20831
20832 __extension__ extern __inline uint8x16_t
20833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20834 __arm_vrshlq (uint8x16_t __a, int8x16_t __b)
20835 {
20836 return __arm_vrshlq_u8 (__a, __b);
20837 }
20838
20839 __extension__ extern __inline uint8x16_t
20840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20841 __arm_vrshlq (uint8x16_t __a, int32_t __b)
20842 {
20843 return __arm_vrshlq_n_u8 (__a, __b);
20844 }
20845
20846 __extension__ extern __inline uint8x16_t
20847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20848 __arm_vqshlq (uint8x16_t __a, int8x16_t __b)
20849 {
20850 return __arm_vqshlq_u8 (__a, __b);
20851 }
20852
20853 __extension__ extern __inline uint8x16_t
20854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20855 __arm_vqshlq_r (uint8x16_t __a, int32_t __b)
20856 {
20857 return __arm_vqshlq_r_u8 (__a, __b);
20858 }
20859
20860 __extension__ extern __inline uint8x16_t
20861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20862 __arm_vqrshlq (uint8x16_t __a, int8x16_t __b)
20863 {
20864 return __arm_vqrshlq_u8 (__a, __b);
20865 }
20866
20867 __extension__ extern __inline uint8x16_t
20868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20869 __arm_vqrshlq (uint8x16_t __a, int32_t __b)
20870 {
20871 return __arm_vqrshlq_n_u8 (__a, __b);
20872 }
20873
20874 __extension__ extern __inline uint8_t
20875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20876 __arm_vminavq (uint8_t __a, int8x16_t __b)
20877 {
20878 return __arm_vminavq_s8 (__a, __b);
20879 }
20880
20881 __extension__ extern __inline uint8x16_t
20882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20883 __arm_vminaq (uint8x16_t __a, int8x16_t __b)
20884 {
20885 return __arm_vminaq_s8 (__a, __b);
20886 }
20887
20888 __extension__ extern __inline uint8_t
20889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20890 __arm_vmaxavq (uint8_t __a, int8x16_t __b)
20891 {
20892 return __arm_vmaxavq_s8 (__a, __b);
20893 }
20894
20895 __extension__ extern __inline uint8x16_t
20896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20897 __arm_vmaxaq (uint8x16_t __a, int8x16_t __b)
20898 {
20899 return __arm_vmaxaq_s8 (__a, __b);
20900 }
20901
20902 __extension__ extern __inline uint8x16_t
20903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20904 __arm_vbrsrq (uint8x16_t __a, int32_t __b)
20905 {
20906 return __arm_vbrsrq_n_u8 (__a, __b);
20907 }
20908
20909 __extension__ extern __inline uint8x16_t
20910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20911 __arm_vshlq_n (uint8x16_t __a, const int __imm)
20912 {
20913 return __arm_vshlq_n_u8 (__a, __imm);
20914 }
20915
20916 __extension__ extern __inline uint8x16_t
20917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20918 __arm_vrshrq (uint8x16_t __a, const int __imm)
20919 {
20920 return __arm_vrshrq_n_u8 (__a, __imm);
20921 }
20922
20923 __extension__ extern __inline uint8x16_t
20924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20925 __arm_vqshlq_n (uint8x16_t __a, const int __imm)
20926 {
20927 return __arm_vqshlq_n_u8 (__a, __imm);
20928 }
20929
20930 __extension__ extern __inline mve_pred16_t
20931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20932 __arm_vcmpneq (int8x16_t __a, int8_t __b)
20933 {
20934 return __arm_vcmpneq_n_s8 (__a, __b);
20935 }
20936
20937 __extension__ extern __inline mve_pred16_t
20938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20939 __arm_vcmpltq (int8x16_t __a, int8x16_t __b)
20940 {
20941 return __arm_vcmpltq_s8 (__a, __b);
20942 }
20943
20944 __extension__ extern __inline mve_pred16_t
20945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20946 __arm_vcmpltq (int8x16_t __a, int8_t __b)
20947 {
20948 return __arm_vcmpltq_n_s8 (__a, __b);
20949 }
20950
20951 __extension__ extern __inline mve_pred16_t
20952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20953 __arm_vcmpleq (int8x16_t __a, int8x16_t __b)
20954 {
20955 return __arm_vcmpleq_s8 (__a, __b);
20956 }
20957
20958 __extension__ extern __inline mve_pred16_t
20959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20960 __arm_vcmpleq (int8x16_t __a, int8_t __b)
20961 {
20962 return __arm_vcmpleq_n_s8 (__a, __b);
20963 }
20964
20965 __extension__ extern __inline mve_pred16_t
20966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20967 __arm_vcmpgtq (int8x16_t __a, int8x16_t __b)
20968 {
20969 return __arm_vcmpgtq_s8 (__a, __b);
20970 }
20971
20972 __extension__ extern __inline mve_pred16_t
20973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20974 __arm_vcmpgtq (int8x16_t __a, int8_t __b)
20975 {
20976 return __arm_vcmpgtq_n_s8 (__a, __b);
20977 }
20978
20979 __extension__ extern __inline mve_pred16_t
20980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20981 __arm_vcmpgeq (int8x16_t __a, int8x16_t __b)
20982 {
20983 return __arm_vcmpgeq_s8 (__a, __b);
20984 }
20985
20986 __extension__ extern __inline mve_pred16_t
20987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20988 __arm_vcmpgeq (int8x16_t __a, int8_t __b)
20989 {
20990 return __arm_vcmpgeq_n_s8 (__a, __b);
20991 }
20992
20993 __extension__ extern __inline mve_pred16_t
20994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20995 __arm_vcmpeqq (int8x16_t __a, int8x16_t __b)
20996 {
20997 return __arm_vcmpeqq_s8 (__a, __b);
20998 }
20999
21000 __extension__ extern __inline mve_pred16_t
21001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21002 __arm_vcmpeqq (int8x16_t __a, int8_t __b)
21003 {
21004 return __arm_vcmpeqq_n_s8 (__a, __b);
21005 }
21006
21007 __extension__ extern __inline uint8x16_t
21008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21009 __arm_vqshluq (int8x16_t __a, const int __imm)
21010 {
21011 return __arm_vqshluq_n_s8 (__a, __imm);
21012 }
21013
21014 __extension__ extern __inline int32_t
21015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21016 __arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p)
21017 {
21018 return __arm_vaddvq_p_s8 (__a, __p);
21019 }
21020
21021 __extension__ extern __inline int8x16_t
21022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21023 __arm_vsubq (int8x16_t __a, int8x16_t __b)
21024 {
21025 return __arm_vsubq_s8 (__a, __b);
21026 }
21027
21028 __extension__ extern __inline int8x16_t
21029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21030 __arm_vsubq (int8x16_t __a, int8_t __b)
21031 {
21032 return __arm_vsubq_n_s8 (__a, __b);
21033 }
21034
21035 __extension__ extern __inline int8x16_t
21036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21037 __arm_vshlq_r (int8x16_t __a, int32_t __b)
21038 {
21039 return __arm_vshlq_r_s8 (__a, __b);
21040 }
21041
21042 __extension__ extern __inline int8x16_t
21043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21044 __arm_vrshlq (int8x16_t __a, int8x16_t __b)
21045 {
21046 return __arm_vrshlq_s8 (__a, __b);
21047 }
21048
21049 __extension__ extern __inline int8x16_t
21050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21051 __arm_vrshlq (int8x16_t __a, int32_t __b)
21052 {
21053 return __arm_vrshlq_n_s8 (__a, __b);
21054 }
21055
21056 __extension__ extern __inline int8x16_t
21057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21058 __arm_vrmulhq (int8x16_t __a, int8x16_t __b)
21059 {
21060 return __arm_vrmulhq_s8 (__a, __b);
21061 }
21062
21063 __extension__ extern __inline int8x16_t
21064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21065 __arm_vrhaddq (int8x16_t __a, int8x16_t __b)
21066 {
21067 return __arm_vrhaddq_s8 (__a, __b);
21068 }
21069
21070 __extension__ extern __inline int8x16_t
21071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21072 __arm_vqsubq (int8x16_t __a, int8x16_t __b)
21073 {
21074 return __arm_vqsubq_s8 (__a, __b);
21075 }
21076
21077 __extension__ extern __inline int8x16_t
21078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21079 __arm_vqsubq (int8x16_t __a, int8_t __b)
21080 {
21081 return __arm_vqsubq_n_s8 (__a, __b);
21082 }
21083
21084 __extension__ extern __inline int8x16_t
21085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21086 __arm_vqshlq (int8x16_t __a, int8x16_t __b)
21087 {
21088 return __arm_vqshlq_s8 (__a, __b);
21089 }
21090
21091 __extension__ extern __inline int8x16_t
21092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21093 __arm_vqshlq_r (int8x16_t __a, int32_t __b)
21094 {
21095 return __arm_vqshlq_r_s8 (__a, __b);
21096 }
21097
21098 __extension__ extern __inline int8x16_t
21099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21100 __arm_vqrshlq (int8x16_t __a, int8x16_t __b)
21101 {
21102 return __arm_vqrshlq_s8 (__a, __b);
21103 }
21104
21105 __extension__ extern __inline int8x16_t
21106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21107 __arm_vqrshlq (int8x16_t __a, int32_t __b)
21108 {
21109 return __arm_vqrshlq_n_s8 (__a, __b);
21110 }
21111
21112 __extension__ extern __inline int8x16_t
21113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21114 __arm_vqrdmulhq (int8x16_t __a, int8x16_t __b)
21115 {
21116 return __arm_vqrdmulhq_s8 (__a, __b);
21117 }
21118
21119 __extension__ extern __inline int8x16_t
21120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21121 __arm_vqrdmulhq (int8x16_t __a, int8_t __b)
21122 {
21123 return __arm_vqrdmulhq_n_s8 (__a, __b);
21124 }
21125
21126 __extension__ extern __inline int8x16_t
21127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21128 __arm_vqdmulhq (int8x16_t __a, int8x16_t __b)
21129 {
21130 return __arm_vqdmulhq_s8 (__a, __b);
21131 }
21132
21133 __extension__ extern __inline int8x16_t
21134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21135 __arm_vqdmulhq (int8x16_t __a, int8_t __b)
21136 {
21137 return __arm_vqdmulhq_n_s8 (__a, __b);
21138 }
21139
21140 __extension__ extern __inline int8x16_t
21141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21142 __arm_vqaddq (int8x16_t __a, int8x16_t __b)
21143 {
21144 return __arm_vqaddq_s8 (__a, __b);
21145 }
21146
21147 __extension__ extern __inline int8x16_t
21148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21149 __arm_vqaddq (int8x16_t __a, int8_t __b)
21150 {
21151 return __arm_vqaddq_n_s8 (__a, __b);
21152 }
21153
21154 __extension__ extern __inline int8x16_t
21155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21156 __arm_vorrq (int8x16_t __a, int8x16_t __b)
21157 {
21158 return __arm_vorrq_s8 (__a, __b);
21159 }
21160
21161 __extension__ extern __inline int8x16_t
21162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21163 __arm_vornq (int8x16_t __a, int8x16_t __b)
21164 {
21165 return __arm_vornq_s8 (__a, __b);
21166 }
21167
21168 __extension__ extern __inline int8x16_t
21169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21170 __arm_vmulq (int8x16_t __a, int8x16_t __b)
21171 {
21172 return __arm_vmulq_s8 (__a, __b);
21173 }
21174
21175 __extension__ extern __inline int8x16_t
21176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21177 __arm_vmulq (int8x16_t __a, int8_t __b)
21178 {
21179 return __arm_vmulq_n_s8 (__a, __b);
21180 }
21181
21182 __extension__ extern __inline int16x8_t
21183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21184 __arm_vmulltq_int (int8x16_t __a, int8x16_t __b)
21185 {
21186 return __arm_vmulltq_int_s8 (__a, __b);
21187 }
21188
21189 __extension__ extern __inline int16x8_t
21190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21191 __arm_vmullbq_int (int8x16_t __a, int8x16_t __b)
21192 {
21193 return __arm_vmullbq_int_s8 (__a, __b);
21194 }
21195
21196 __extension__ extern __inline int8x16_t
21197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21198 __arm_vmulhq (int8x16_t __a, int8x16_t __b)
21199 {
21200 return __arm_vmulhq_s8 (__a, __b);
21201 }
21202
21203 __extension__ extern __inline int32_t
21204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21205 __arm_vmlsdavxq (int8x16_t __a, int8x16_t __b)
21206 {
21207 return __arm_vmlsdavxq_s8 (__a, __b);
21208 }
21209
21210 __extension__ extern __inline int32_t
21211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21212 __arm_vmlsdavq (int8x16_t __a, int8x16_t __b)
21213 {
21214 return __arm_vmlsdavq_s8 (__a, __b);
21215 }
21216
21217 __extension__ extern __inline int32_t
21218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21219 __arm_vmladavxq (int8x16_t __a, int8x16_t __b)
21220 {
21221 return __arm_vmladavxq_s8 (__a, __b);
21222 }
21223
21224 __extension__ extern __inline int32_t
21225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21226 __arm_vmladavq (int8x16_t __a, int8x16_t __b)
21227 {
21228 return __arm_vmladavq_s8 (__a, __b);
21229 }
21230
21231 __extension__ extern __inline int8_t
21232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21233 __arm_vminvq (int8_t __a, int8x16_t __b)
21234 {
21235 return __arm_vminvq_s8 (__a, __b);
21236 }
21237
21238 __extension__ extern __inline int8x16_t
21239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21240 __arm_vminq (int8x16_t __a, int8x16_t __b)
21241 {
21242 return __arm_vminq_s8 (__a, __b);
21243 }
21244
21245 __extension__ extern __inline int8_t
21246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21247 __arm_vmaxvq (int8_t __a, int8x16_t __b)
21248 {
21249 return __arm_vmaxvq_s8 (__a, __b);
21250 }
21251
21252 __extension__ extern __inline int8x16_t
21253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21254 __arm_vmaxq (int8x16_t __a, int8x16_t __b)
21255 {
21256 return __arm_vmaxq_s8 (__a, __b);
21257 }
21258
21259 __extension__ extern __inline int8x16_t
21260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21261 __arm_vhsubq (int8x16_t __a, int8x16_t __b)
21262 {
21263 return __arm_vhsubq_s8 (__a, __b);
21264 }
21265
21266 __extension__ extern __inline int8x16_t
21267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21268 __arm_vhsubq (int8x16_t __a, int8_t __b)
21269 {
21270 return __arm_vhsubq_n_s8 (__a, __b);
21271 }
21272
21273 __extension__ extern __inline int8x16_t
21274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21275 __arm_vhcaddq_rot90 (int8x16_t __a, int8x16_t __b)
21276 {
21277 return __arm_vhcaddq_rot90_s8 (__a, __b);
21278 }
21279
21280 __extension__ extern __inline int8x16_t
21281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21282 __arm_vhcaddq_rot270 (int8x16_t __a, int8x16_t __b)
21283 {
21284 return __arm_vhcaddq_rot270_s8 (__a, __b);
21285 }
21286
21287 __extension__ extern __inline int8x16_t
21288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21289 __arm_vhaddq (int8x16_t __a, int8x16_t __b)
21290 {
21291 return __arm_vhaddq_s8 (__a, __b);
21292 }
21293
21294 __extension__ extern __inline int8x16_t
21295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21296 __arm_vhaddq (int8x16_t __a, int8_t __b)
21297 {
21298 return __arm_vhaddq_n_s8 (__a, __b);
21299 }
21300
21301 __extension__ extern __inline int8x16_t
21302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21303 __arm_veorq (int8x16_t __a, int8x16_t __b)
21304 {
21305 return __arm_veorq_s8 (__a, __b);
21306 }
21307
21308 __extension__ extern __inline int8x16_t
21309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21310 __arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b)
21311 {
21312 return __arm_vcaddq_rot90_s8 (__a, __b);
21313 }
21314
21315 __extension__ extern __inline int8x16_t
21316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21317 __arm_vcaddq_rot270 (int8x16_t __a, int8x16_t __b)
21318 {
21319 return __arm_vcaddq_rot270_s8 (__a, __b);
21320 }
21321
21322 __extension__ extern __inline int8x16_t
21323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21324 __arm_vbrsrq (int8x16_t __a, int32_t __b)
21325 {
21326 return __arm_vbrsrq_n_s8 (__a, __b);
21327 }
21328
21329 __extension__ extern __inline int8x16_t
21330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21331 __arm_vbicq (int8x16_t __a, int8x16_t __b)
21332 {
21333 return __arm_vbicq_s8 (__a, __b);
21334 }
21335
21336 __extension__ extern __inline int8x16_t
21337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21338 __arm_vandq (int8x16_t __a, int8x16_t __b)
21339 {
21340 return __arm_vandq_s8 (__a, __b);
21341 }
21342
21343 __extension__ extern __inline int32_t
21344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21345 __arm_vaddvaq (int32_t __a, int8x16_t __b)
21346 {
21347 return __arm_vaddvaq_s8 (__a, __b);
21348 }
21349
21350 __extension__ extern __inline int8x16_t
21351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21352 __arm_vaddq (int8x16_t __a, int8_t __b)
21353 {
21354 return __arm_vaddq_n_s8 (__a, __b);
21355 }
21356
21357 __extension__ extern __inline int8x16_t
21358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21359 __arm_vabdq (int8x16_t __a, int8x16_t __b)
21360 {
21361 return __arm_vabdq_s8 (__a, __b);
21362 }
21363
21364 __extension__ extern __inline int8x16_t
21365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21366 __arm_vshlq_n (int8x16_t __a, const int __imm)
21367 {
21368 return __arm_vshlq_n_s8 (__a, __imm);
21369 }
21370
21371 __extension__ extern __inline int8x16_t
21372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21373 __arm_vrshrq (int8x16_t __a, const int __imm)
21374 {
21375 return __arm_vrshrq_n_s8 (__a, __imm);
21376 }
21377
21378 __extension__ extern __inline int8x16_t
21379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21380 __arm_vqshlq_n (int8x16_t __a, const int __imm)
21381 {
21382 return __arm_vqshlq_n_s8 (__a, __imm);
21383 }
21384
21385 __extension__ extern __inline uint16x8_t
21386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21387 __arm_vsubq (uint16x8_t __a, uint16x8_t __b)
21388 {
21389 return __arm_vsubq_u16 (__a, __b);
21390 }
21391
21392 __extension__ extern __inline uint16x8_t
21393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21394 __arm_vsubq (uint16x8_t __a, uint16_t __b)
21395 {
21396 return __arm_vsubq_n_u16 (__a, __b);
21397 }
21398
21399 __extension__ extern __inline uint16x8_t
21400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21401 __arm_vrmulhq (uint16x8_t __a, uint16x8_t __b)
21402 {
21403 return __arm_vrmulhq_u16 (__a, __b);
21404 }
21405
21406 __extension__ extern __inline uint16x8_t
21407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21408 __arm_vrhaddq (uint16x8_t __a, uint16x8_t __b)
21409 {
21410 return __arm_vrhaddq_u16 (__a, __b);
21411 }
21412
21413 __extension__ extern __inline uint16x8_t
21414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21415 __arm_vqsubq (uint16x8_t __a, uint16x8_t __b)
21416 {
21417 return __arm_vqsubq_u16 (__a, __b);
21418 }
21419
21420 __extension__ extern __inline uint16x8_t
21421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21422 __arm_vqsubq (uint16x8_t __a, uint16_t __b)
21423 {
21424 return __arm_vqsubq_n_u16 (__a, __b);
21425 }
21426
21427 __extension__ extern __inline uint16x8_t
21428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21429 __arm_vqaddq (uint16x8_t __a, uint16x8_t __b)
21430 {
21431 return __arm_vqaddq_u16 (__a, __b);
21432 }
21433
21434 __extension__ extern __inline uint16x8_t
21435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21436 __arm_vqaddq (uint16x8_t __a, uint16_t __b)
21437 {
21438 return __arm_vqaddq_n_u16 (__a, __b);
21439 }
21440
21441 __extension__ extern __inline uint16x8_t
21442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21443 __arm_vorrq (uint16x8_t __a, uint16x8_t __b)
21444 {
21445 return __arm_vorrq_u16 (__a, __b);
21446 }
21447
21448 __extension__ extern __inline uint16x8_t
21449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21450 __arm_vornq (uint16x8_t __a, uint16x8_t __b)
21451 {
21452 return __arm_vornq_u16 (__a, __b);
21453 }
21454
21455 __extension__ extern __inline uint16x8_t
21456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21457 __arm_vmulq (uint16x8_t __a, uint16x8_t __b)
21458 {
21459 return __arm_vmulq_u16 (__a, __b);
21460 }
21461
21462 __extension__ extern __inline uint16x8_t
21463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21464 __arm_vmulq (uint16x8_t __a, uint16_t __b)
21465 {
21466 return __arm_vmulq_n_u16 (__a, __b);
21467 }
21468
21469 __extension__ extern __inline uint32x4_t
21470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21471 __arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b)
21472 {
21473 return __arm_vmulltq_int_u16 (__a, __b);
21474 }
21475
21476 __extension__ extern __inline uint32x4_t
21477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21478 __arm_vmullbq_int (uint16x8_t __a, uint16x8_t __b)
21479 {
21480 return __arm_vmullbq_int_u16 (__a, __b);
21481 }
21482
21483 __extension__ extern __inline uint16x8_t
21484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21485 __arm_vmulhq (uint16x8_t __a, uint16x8_t __b)
21486 {
21487 return __arm_vmulhq_u16 (__a, __b);
21488 }
21489
21490 __extension__ extern __inline uint32_t
21491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21492 __arm_vmladavq (uint16x8_t __a, uint16x8_t __b)
21493 {
21494 return __arm_vmladavq_u16 (__a, __b);
21495 }
21496
21497 __extension__ extern __inline uint16_t
21498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21499 __arm_vminvq (uint16_t __a, uint16x8_t __b)
21500 {
21501 return __arm_vminvq_u16 (__a, __b);
21502 }
21503
21504 __extension__ extern __inline uint16x8_t
21505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21506 __arm_vminq (uint16x8_t __a, uint16x8_t __b)
21507 {
21508 return __arm_vminq_u16 (__a, __b);
21509 }
21510
21511 __extension__ extern __inline uint16_t
21512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21513 __arm_vmaxvq (uint16_t __a, uint16x8_t __b)
21514 {
21515 return __arm_vmaxvq_u16 (__a, __b);
21516 }
21517
21518 __extension__ extern __inline uint16x8_t
21519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21520 __arm_vmaxq (uint16x8_t __a, uint16x8_t __b)
21521 {
21522 return __arm_vmaxq_u16 (__a, __b);
21523 }
21524
21525 __extension__ extern __inline uint16x8_t
21526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21527 __arm_vhsubq (uint16x8_t __a, uint16x8_t __b)
21528 {
21529 return __arm_vhsubq_u16 (__a, __b);
21530 }
21531
21532 __extension__ extern __inline uint16x8_t
21533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21534 __arm_vhsubq (uint16x8_t __a, uint16_t __b)
21535 {
21536 return __arm_vhsubq_n_u16 (__a, __b);
21537 }
21538
21539 __extension__ extern __inline uint16x8_t
21540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21541 __arm_vhaddq (uint16x8_t __a, uint16x8_t __b)
21542 {
21543 return __arm_vhaddq_u16 (__a, __b);
21544 }
21545
21546 __extension__ extern __inline uint16x8_t
21547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21548 __arm_vhaddq (uint16x8_t __a, uint16_t __b)
21549 {
21550 return __arm_vhaddq_n_u16 (__a, __b);
21551 }
21552
21553 __extension__ extern __inline uint16x8_t
21554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21555 __arm_veorq (uint16x8_t __a, uint16x8_t __b)
21556 {
21557 return __arm_veorq_u16 (__a, __b);
21558 }
21559
21560 __extension__ extern __inline mve_pred16_t
21561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21562 __arm_vcmpneq (uint16x8_t __a, uint16_t __b)
21563 {
21564 return __arm_vcmpneq_n_u16 (__a, __b);
21565 }
21566
21567 __extension__ extern __inline mve_pred16_t
21568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21569 __arm_vcmphiq (uint16x8_t __a, uint16x8_t __b)
21570 {
21571 return __arm_vcmphiq_u16 (__a, __b);
21572 }
21573
21574 __extension__ extern __inline mve_pred16_t
21575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21576 __arm_vcmphiq (uint16x8_t __a, uint16_t __b)
21577 {
21578 return __arm_vcmphiq_n_u16 (__a, __b);
21579 }
21580
21581 __extension__ extern __inline mve_pred16_t
21582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21583 __arm_vcmpeqq (uint16x8_t __a, uint16x8_t __b)
21584 {
21585 return __arm_vcmpeqq_u16 (__a, __b);
21586 }
21587
21588 __extension__ extern __inline mve_pred16_t
21589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21590 __arm_vcmpeqq (uint16x8_t __a, uint16_t __b)
21591 {
21592 return __arm_vcmpeqq_n_u16 (__a, __b);
21593 }
21594
21595 __extension__ extern __inline mve_pred16_t
21596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21597 __arm_vcmpcsq (uint16x8_t __a, uint16x8_t __b)
21598 {
21599 return __arm_vcmpcsq_u16 (__a, __b);
21600 }
21601
21602 __extension__ extern __inline mve_pred16_t
21603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21604 __arm_vcmpcsq (uint16x8_t __a, uint16_t __b)
21605 {
21606 return __arm_vcmpcsq_n_u16 (__a, __b);
21607 }
21608
21609 __extension__ extern __inline uint16x8_t
21610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21611 __arm_vcaddq_rot90 (uint16x8_t __a, uint16x8_t __b)
21612 {
21613 return __arm_vcaddq_rot90_u16 (__a, __b);
21614 }
21615
21616 __extension__ extern __inline uint16x8_t
21617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21618 __arm_vcaddq_rot270 (uint16x8_t __a, uint16x8_t __b)
21619 {
21620 return __arm_vcaddq_rot270_u16 (__a, __b);
21621 }
21622
21623 __extension__ extern __inline uint16x8_t
21624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21625 __arm_vbicq (uint16x8_t __a, uint16x8_t __b)
21626 {
21627 return __arm_vbicq_u16 (__a, __b);
21628 }
21629
21630 __extension__ extern __inline uint16x8_t
21631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21632 __arm_vandq (uint16x8_t __a, uint16x8_t __b)
21633 {
21634 return __arm_vandq_u16 (__a, __b);
21635 }
21636
21637 __extension__ extern __inline uint32_t
21638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21639 __arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p)
21640 {
21641 return __arm_vaddvq_p_u16 (__a, __p);
21642 }
21643
21644 __extension__ extern __inline uint32_t
21645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21646 __arm_vaddvaq (uint32_t __a, uint16x8_t __b)
21647 {
21648 return __arm_vaddvaq_u16 (__a, __b);
21649 }
21650
21651 __extension__ extern __inline uint16x8_t
21652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21653 __arm_vaddq (uint16x8_t __a, uint16_t __b)
21654 {
21655 return __arm_vaddq_n_u16 (__a, __b);
21656 }
21657
21658 __extension__ extern __inline uint16x8_t
21659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21660 __arm_vabdq (uint16x8_t __a, uint16x8_t __b)
21661 {
21662 return __arm_vabdq_u16 (__a, __b);
21663 }
21664
21665 __extension__ extern __inline uint16x8_t
21666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21667 __arm_vshlq_r (uint16x8_t __a, int32_t __b)
21668 {
21669 return __arm_vshlq_r_u16 (__a, __b);
21670 }
21671
21672 __extension__ extern __inline uint16x8_t
21673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21674 __arm_vrshlq (uint16x8_t __a, int16x8_t __b)
21675 {
21676 return __arm_vrshlq_u16 (__a, __b);
21677 }
21678
21679 __extension__ extern __inline uint16x8_t
21680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21681 __arm_vrshlq (uint16x8_t __a, int32_t __b)
21682 {
21683 return __arm_vrshlq_n_u16 (__a, __b);
21684 }
21685
21686 __extension__ extern __inline uint16x8_t
21687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21688 __arm_vqshlq (uint16x8_t __a, int16x8_t __b)
21689 {
21690 return __arm_vqshlq_u16 (__a, __b);
21691 }
21692
21693 __extension__ extern __inline uint16x8_t
21694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21695 __arm_vqshlq_r (uint16x8_t __a, int32_t __b)
21696 {
21697 return __arm_vqshlq_r_u16 (__a, __b);
21698 }
21699
21700 __extension__ extern __inline uint16x8_t
21701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21702 __arm_vqrshlq (uint16x8_t __a, int16x8_t __b)
21703 {
21704 return __arm_vqrshlq_u16 (__a, __b);
21705 }
21706
21707 __extension__ extern __inline uint16x8_t
21708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21709 __arm_vqrshlq (uint16x8_t __a, int32_t __b)
21710 {
21711 return __arm_vqrshlq_n_u16 (__a, __b);
21712 }
21713
21714 __extension__ extern __inline uint16_t
21715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21716 __arm_vminavq (uint16_t __a, int16x8_t __b)
21717 {
21718 return __arm_vminavq_s16 (__a, __b);
21719 }
21720
21721 __extension__ extern __inline uint16x8_t
21722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21723 __arm_vminaq (uint16x8_t __a, int16x8_t __b)
21724 {
21725 return __arm_vminaq_s16 (__a, __b);
21726 }
21727
21728 __extension__ extern __inline uint16_t
21729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21730 __arm_vmaxavq (uint16_t __a, int16x8_t __b)
21731 {
21732 return __arm_vmaxavq_s16 (__a, __b);
21733 }
21734
21735 __extension__ extern __inline uint16x8_t
21736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21737 __arm_vmaxaq (uint16x8_t __a, int16x8_t __b)
21738 {
21739 return __arm_vmaxaq_s16 (__a, __b);
21740 }
21741
21742 __extension__ extern __inline uint16x8_t
21743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21744 __arm_vbrsrq (uint16x8_t __a, int32_t __b)
21745 {
21746 return __arm_vbrsrq_n_u16 (__a, __b);
21747 }
21748
21749 __extension__ extern __inline uint16x8_t
21750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21751 __arm_vshlq_n (uint16x8_t __a, const int __imm)
21752 {
21753 return __arm_vshlq_n_u16 (__a, __imm);
21754 }
21755
21756 __extension__ extern __inline uint16x8_t
21757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21758 __arm_vrshrq (uint16x8_t __a, const int __imm)
21759 {
21760 return __arm_vrshrq_n_u16 (__a, __imm);
21761 }
21762
21763 __extension__ extern __inline uint16x8_t
21764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21765 __arm_vqshlq_n (uint16x8_t __a, const int __imm)
21766 {
21767 return __arm_vqshlq_n_u16 (__a, __imm);
21768 }
21769
21770 __extension__ extern __inline mve_pred16_t
21771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21772 __arm_vcmpneq (int16x8_t __a, int16_t __b)
21773 {
21774 return __arm_vcmpneq_n_s16 (__a, __b);
21775 }
21776
21777 __extension__ extern __inline mve_pred16_t
21778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21779 __arm_vcmpltq (int16x8_t __a, int16x8_t __b)
21780 {
21781 return __arm_vcmpltq_s16 (__a, __b);
21782 }
21783
21784 __extension__ extern __inline mve_pred16_t
21785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21786 __arm_vcmpltq (int16x8_t __a, int16_t __b)
21787 {
21788 return __arm_vcmpltq_n_s16 (__a, __b);
21789 }
21790
21791 __extension__ extern __inline mve_pred16_t
21792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21793 __arm_vcmpleq (int16x8_t __a, int16x8_t __b)
21794 {
21795 return __arm_vcmpleq_s16 (__a, __b);
21796 }
21797
21798 __extension__ extern __inline mve_pred16_t
21799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21800 __arm_vcmpleq (int16x8_t __a, int16_t __b)
21801 {
21802 return __arm_vcmpleq_n_s16 (__a, __b);
21803 }
21804
21805 __extension__ extern __inline mve_pred16_t
21806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21807 __arm_vcmpgtq (int16x8_t __a, int16x8_t __b)
21808 {
21809 return __arm_vcmpgtq_s16 (__a, __b);
21810 }
21811
21812 __extension__ extern __inline mve_pred16_t
21813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21814 __arm_vcmpgtq (int16x8_t __a, int16_t __b)
21815 {
21816 return __arm_vcmpgtq_n_s16 (__a, __b);
21817 }
21818
21819 __extension__ extern __inline mve_pred16_t
21820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21821 __arm_vcmpgeq (int16x8_t __a, int16x8_t __b)
21822 {
21823 return __arm_vcmpgeq_s16 (__a, __b);
21824 }
21825
21826 __extension__ extern __inline mve_pred16_t
21827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21828 __arm_vcmpgeq (int16x8_t __a, int16_t __b)
21829 {
21830 return __arm_vcmpgeq_n_s16 (__a, __b);
21831 }
21832
21833 __extension__ extern __inline mve_pred16_t
21834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21835 __arm_vcmpeqq (int16x8_t __a, int16x8_t __b)
21836 {
21837 return __arm_vcmpeqq_s16 (__a, __b);
21838 }
21839
21840 __extension__ extern __inline mve_pred16_t
21841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21842 __arm_vcmpeqq (int16x8_t __a, int16_t __b)
21843 {
21844 return __arm_vcmpeqq_n_s16 (__a, __b);
21845 }
21846
21847 __extension__ extern __inline uint16x8_t
21848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21849 __arm_vqshluq (int16x8_t __a, const int __imm)
21850 {
21851 return __arm_vqshluq_n_s16 (__a, __imm);
21852 }
21853
21854 __extension__ extern __inline int32_t
21855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21856 __arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p)
21857 {
21858 return __arm_vaddvq_p_s16 (__a, __p);
21859 }
21860
21861 __extension__ extern __inline int16x8_t
21862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21863 __arm_vsubq (int16x8_t __a, int16x8_t __b)
21864 {
21865 return __arm_vsubq_s16 (__a, __b);
21866 }
21867
21868 __extension__ extern __inline int16x8_t
21869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21870 __arm_vsubq (int16x8_t __a, int16_t __b)
21871 {
21872 return __arm_vsubq_n_s16 (__a, __b);
21873 }
21874
21875 __extension__ extern __inline int16x8_t
21876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21877 __arm_vshlq_r (int16x8_t __a, int32_t __b)
21878 {
21879 return __arm_vshlq_r_s16 (__a, __b);
21880 }
21881
21882 __extension__ extern __inline int16x8_t
21883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21884 __arm_vrshlq (int16x8_t __a, int16x8_t __b)
21885 {
21886 return __arm_vrshlq_s16 (__a, __b);
21887 }
21888
21889 __extension__ extern __inline int16x8_t
21890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21891 __arm_vrshlq (int16x8_t __a, int32_t __b)
21892 {
21893 return __arm_vrshlq_n_s16 (__a, __b);
21894 }
21895
21896 __extension__ extern __inline int16x8_t
21897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21898 __arm_vrmulhq (int16x8_t __a, int16x8_t __b)
21899 {
21900 return __arm_vrmulhq_s16 (__a, __b);
21901 }
21902
21903 __extension__ extern __inline int16x8_t
21904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21905 __arm_vrhaddq (int16x8_t __a, int16x8_t __b)
21906 {
21907 return __arm_vrhaddq_s16 (__a, __b);
21908 }
21909
21910 __extension__ extern __inline int16x8_t
21911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21912 __arm_vqsubq (int16x8_t __a, int16x8_t __b)
21913 {
21914 return __arm_vqsubq_s16 (__a, __b);
21915 }
21916
21917 __extension__ extern __inline int16x8_t
21918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21919 __arm_vqsubq (int16x8_t __a, int16_t __b)
21920 {
21921 return __arm_vqsubq_n_s16 (__a, __b);
21922 }
21923
21924 __extension__ extern __inline int16x8_t
21925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21926 __arm_vqshlq (int16x8_t __a, int16x8_t __b)
21927 {
21928 return __arm_vqshlq_s16 (__a, __b);
21929 }
21930
21931 __extension__ extern __inline int16x8_t
21932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21933 __arm_vqshlq_r (int16x8_t __a, int32_t __b)
21934 {
21935 return __arm_vqshlq_r_s16 (__a, __b);
21936 }
21937
21938 __extension__ extern __inline int16x8_t
21939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21940 __arm_vqrshlq (int16x8_t __a, int16x8_t __b)
21941 {
21942 return __arm_vqrshlq_s16 (__a, __b);
21943 }
21944
21945 __extension__ extern __inline int16x8_t
21946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21947 __arm_vqrshlq (int16x8_t __a, int32_t __b)
21948 {
21949 return __arm_vqrshlq_n_s16 (__a, __b);
21950 }
21951
21952 __extension__ extern __inline int16x8_t
21953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21954 __arm_vqrdmulhq (int16x8_t __a, int16x8_t __b)
21955 {
21956 return __arm_vqrdmulhq_s16 (__a, __b);
21957 }
21958
21959 __extension__ extern __inline int16x8_t
21960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21961 __arm_vqrdmulhq (int16x8_t __a, int16_t __b)
21962 {
21963 return __arm_vqrdmulhq_n_s16 (__a, __b);
21964 }
21965
21966 __extension__ extern __inline int16x8_t
21967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21968 __arm_vqdmulhq (int16x8_t __a, int16x8_t __b)
21969 {
21970 return __arm_vqdmulhq_s16 (__a, __b);
21971 }
21972
21973 __extension__ extern __inline int16x8_t
21974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21975 __arm_vqdmulhq (int16x8_t __a, int16_t __b)
21976 {
21977 return __arm_vqdmulhq_n_s16 (__a, __b);
21978 }
21979
21980 __extension__ extern __inline int16x8_t
21981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21982 __arm_vqaddq (int16x8_t __a, int16x8_t __b)
21983 {
21984 return __arm_vqaddq_s16 (__a, __b);
21985 }
21986
21987 __extension__ extern __inline int16x8_t
21988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21989 __arm_vqaddq (int16x8_t __a, int16_t __b)
21990 {
21991 return __arm_vqaddq_n_s16 (__a, __b);
21992 }
21993
21994 __extension__ extern __inline int16x8_t
21995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21996 __arm_vorrq (int16x8_t __a, int16x8_t __b)
21997 {
21998 return __arm_vorrq_s16 (__a, __b);
21999 }
22000
22001 __extension__ extern __inline int16x8_t
22002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22003 __arm_vornq (int16x8_t __a, int16x8_t __b)
22004 {
22005 return __arm_vornq_s16 (__a, __b);
22006 }
22007
22008 __extension__ extern __inline int16x8_t
22009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22010 __arm_vmulq (int16x8_t __a, int16x8_t __b)
22011 {
22012 return __arm_vmulq_s16 (__a, __b);
22013 }
22014
22015 __extension__ extern __inline int16x8_t
22016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22017 __arm_vmulq (int16x8_t __a, int16_t __b)
22018 {
22019 return __arm_vmulq_n_s16 (__a, __b);
22020 }
22021
22022 __extension__ extern __inline int32x4_t
22023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22024 __arm_vmulltq_int (int16x8_t __a, int16x8_t __b)
22025 {
22026 return __arm_vmulltq_int_s16 (__a, __b);
22027 }
22028
22029 __extension__ extern __inline int32x4_t
22030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22031 __arm_vmullbq_int (int16x8_t __a, int16x8_t __b)
22032 {
22033 return __arm_vmullbq_int_s16 (__a, __b);
22034 }
22035
22036 __extension__ extern __inline int16x8_t
22037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22038 __arm_vmulhq (int16x8_t __a, int16x8_t __b)
22039 {
22040 return __arm_vmulhq_s16 (__a, __b);
22041 }
22042
22043 __extension__ extern __inline int32_t
22044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22045 __arm_vmlsdavxq (int16x8_t __a, int16x8_t __b)
22046 {
22047 return __arm_vmlsdavxq_s16 (__a, __b);
22048 }
22049
22050 __extension__ extern __inline int32_t
22051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22052 __arm_vmlsdavq (int16x8_t __a, int16x8_t __b)
22053 {
22054 return __arm_vmlsdavq_s16 (__a, __b);
22055 }
22056
22057 __extension__ extern __inline int32_t
22058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22059 __arm_vmladavxq (int16x8_t __a, int16x8_t __b)
22060 {
22061 return __arm_vmladavxq_s16 (__a, __b);
22062 }
22063
22064 __extension__ extern __inline int32_t
22065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22066 __arm_vmladavq (int16x8_t __a, int16x8_t __b)
22067 {
22068 return __arm_vmladavq_s16 (__a, __b);
22069 }
22070
22071 __extension__ extern __inline int16_t
22072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22073 __arm_vminvq (int16_t __a, int16x8_t __b)
22074 {
22075 return __arm_vminvq_s16 (__a, __b);
22076 }
22077
22078 __extension__ extern __inline int16x8_t
22079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22080 __arm_vminq (int16x8_t __a, int16x8_t __b)
22081 {
22082 return __arm_vminq_s16 (__a, __b);
22083 }
22084
22085 __extension__ extern __inline int16_t
22086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22087 __arm_vmaxvq (int16_t __a, int16x8_t __b)
22088 {
22089 return __arm_vmaxvq_s16 (__a, __b);
22090 }
22091
22092 __extension__ extern __inline int16x8_t
22093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22094 __arm_vmaxq (int16x8_t __a, int16x8_t __b)
22095 {
22096 return __arm_vmaxq_s16 (__a, __b);
22097 }
22098
22099 __extension__ extern __inline int16x8_t
22100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22101 __arm_vhsubq (int16x8_t __a, int16x8_t __b)
22102 {
22103 return __arm_vhsubq_s16 (__a, __b);
22104 }
22105
22106 __extension__ extern __inline int16x8_t
22107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22108 __arm_vhsubq (int16x8_t __a, int16_t __b)
22109 {
22110 return __arm_vhsubq_n_s16 (__a, __b);
22111 }
22112
22113 __extension__ extern __inline int16x8_t
22114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22115 __arm_vhcaddq_rot90 (int16x8_t __a, int16x8_t __b)
22116 {
22117 return __arm_vhcaddq_rot90_s16 (__a, __b);
22118 }
22119
22120 __extension__ extern __inline int16x8_t
22121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22122 __arm_vhcaddq_rot270 (int16x8_t __a, int16x8_t __b)
22123 {
22124 return __arm_vhcaddq_rot270_s16 (__a, __b);
22125 }
22126
22127 __extension__ extern __inline int16x8_t
22128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22129 __arm_vhaddq (int16x8_t __a, int16x8_t __b)
22130 {
22131 return __arm_vhaddq_s16 (__a, __b);
22132 }
22133
22134 __extension__ extern __inline int16x8_t
22135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22136 __arm_vhaddq (int16x8_t __a, int16_t __b)
22137 {
22138 return __arm_vhaddq_n_s16 (__a, __b);
22139 }
22140
22141 __extension__ extern __inline int16x8_t
22142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22143 __arm_veorq (int16x8_t __a, int16x8_t __b)
22144 {
22145 return __arm_veorq_s16 (__a, __b);
22146 }
22147
22148 __extension__ extern __inline int16x8_t
22149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22150 __arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b)
22151 {
22152 return __arm_vcaddq_rot90_s16 (__a, __b);
22153 }
22154
22155 __extension__ extern __inline int16x8_t
22156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22157 __arm_vcaddq_rot270 (int16x8_t __a, int16x8_t __b)
22158 {
22159 return __arm_vcaddq_rot270_s16 (__a, __b);
22160 }
22161
22162 __extension__ extern __inline int16x8_t
22163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22164 __arm_vbrsrq (int16x8_t __a, int32_t __b)
22165 {
22166 return __arm_vbrsrq_n_s16 (__a, __b);
22167 }
22168
22169 __extension__ extern __inline int16x8_t
22170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22171 __arm_vbicq (int16x8_t __a, int16x8_t __b)
22172 {
22173 return __arm_vbicq_s16 (__a, __b);
22174 }
22175
22176 __extension__ extern __inline int16x8_t
22177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22178 __arm_vandq (int16x8_t __a, int16x8_t __b)
22179 {
22180 return __arm_vandq_s16 (__a, __b);
22181 }
22182
22183 __extension__ extern __inline int32_t
22184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22185 __arm_vaddvaq (int32_t __a, int16x8_t __b)
22186 {
22187 return __arm_vaddvaq_s16 (__a, __b);
22188 }
22189
22190 __extension__ extern __inline int16x8_t
22191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22192 __arm_vaddq (int16x8_t __a, int16_t __b)
22193 {
22194 return __arm_vaddq_n_s16 (__a, __b);
22195 }
22196
22197 __extension__ extern __inline int16x8_t
22198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22199 __arm_vabdq (int16x8_t __a, int16x8_t __b)
22200 {
22201 return __arm_vabdq_s16 (__a, __b);
22202 }
22203
22204 __extension__ extern __inline int16x8_t
22205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22206 __arm_vshlq_n (int16x8_t __a, const int __imm)
22207 {
22208 return __arm_vshlq_n_s16 (__a, __imm);
22209 }
22210
22211 __extension__ extern __inline int16x8_t
22212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22213 __arm_vrshrq (int16x8_t __a, const int __imm)
22214 {
22215 return __arm_vrshrq_n_s16 (__a, __imm);
22216 }
22217
22218 __extension__ extern __inline int16x8_t
22219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22220 __arm_vqshlq_n (int16x8_t __a, const int __imm)
22221 {
22222 return __arm_vqshlq_n_s16 (__a, __imm);
22223 }
22224
22225 __extension__ extern __inline uint32x4_t
22226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22227 __arm_vsubq (uint32x4_t __a, uint32x4_t __b)
22228 {
22229 return __arm_vsubq_u32 (__a, __b);
22230 }
22231
22232 __extension__ extern __inline uint32x4_t
22233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22234 __arm_vsubq (uint32x4_t __a, uint32_t __b)
22235 {
22236 return __arm_vsubq_n_u32 (__a, __b);
22237 }
22238
22239 __extension__ extern __inline uint32x4_t
22240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22241 __arm_vrmulhq (uint32x4_t __a, uint32x4_t __b)
22242 {
22243 return __arm_vrmulhq_u32 (__a, __b);
22244 }
22245
22246 __extension__ extern __inline uint32x4_t
22247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22248 __arm_vrhaddq (uint32x4_t __a, uint32x4_t __b)
22249 {
22250 return __arm_vrhaddq_u32 (__a, __b);
22251 }
22252
22253 __extension__ extern __inline uint32x4_t
22254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22255 __arm_vqsubq (uint32x4_t __a, uint32x4_t __b)
22256 {
22257 return __arm_vqsubq_u32 (__a, __b);
22258 }
22259
22260 __extension__ extern __inline uint32x4_t
22261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22262 __arm_vqsubq (uint32x4_t __a, uint32_t __b)
22263 {
22264 return __arm_vqsubq_n_u32 (__a, __b);
22265 }
22266
22267 __extension__ extern __inline uint32x4_t
22268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22269 __arm_vqaddq (uint32x4_t __a, uint32x4_t __b)
22270 {
22271 return __arm_vqaddq_u32 (__a, __b);
22272 }
22273
22274 __extension__ extern __inline uint32x4_t
22275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22276 __arm_vqaddq (uint32x4_t __a, uint32_t __b)
22277 {
22278 return __arm_vqaddq_n_u32 (__a, __b);
22279 }
22280
22281 __extension__ extern __inline uint32x4_t
22282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22283 __arm_vorrq (uint32x4_t __a, uint32x4_t __b)
22284 {
22285 return __arm_vorrq_u32 (__a, __b);
22286 }
22287
22288 __extension__ extern __inline uint32x4_t
22289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22290 __arm_vornq (uint32x4_t __a, uint32x4_t __b)
22291 {
22292 return __arm_vornq_u32 (__a, __b);
22293 }
22294
22295 __extension__ extern __inline uint32x4_t
22296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22297 __arm_vmulq (uint32x4_t __a, uint32x4_t __b)
22298 {
22299 return __arm_vmulq_u32 (__a, __b);
22300 }
22301
22302 __extension__ extern __inline uint32x4_t
22303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22304 __arm_vmulq (uint32x4_t __a, uint32_t __b)
22305 {
22306 return __arm_vmulq_n_u32 (__a, __b);
22307 }
22308
22309 __extension__ extern __inline uint64x2_t
22310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22311 __arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b)
22312 {
22313 return __arm_vmulltq_int_u32 (__a, __b);
22314 }
22315
22316 __extension__ extern __inline uint64x2_t
22317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22318 __arm_vmullbq_int (uint32x4_t __a, uint32x4_t __b)
22319 {
22320 return __arm_vmullbq_int_u32 (__a, __b);
22321 }
22322
22323 __extension__ extern __inline uint32x4_t
22324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22325 __arm_vmulhq (uint32x4_t __a, uint32x4_t __b)
22326 {
22327 return __arm_vmulhq_u32 (__a, __b);
22328 }
22329
22330 __extension__ extern __inline uint32_t
22331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22332 __arm_vmladavq (uint32x4_t __a, uint32x4_t __b)
22333 {
22334 return __arm_vmladavq_u32 (__a, __b);
22335 }
22336
22337 __extension__ extern __inline uint32_t
22338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22339 __arm_vminvq (uint32_t __a, uint32x4_t __b)
22340 {
22341 return __arm_vminvq_u32 (__a, __b);
22342 }
22343
22344 __extension__ extern __inline uint32x4_t
22345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22346 __arm_vminq (uint32x4_t __a, uint32x4_t __b)
22347 {
22348 return __arm_vminq_u32 (__a, __b);
22349 }
22350
22351 __extension__ extern __inline uint32_t
22352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22353 __arm_vmaxvq (uint32_t __a, uint32x4_t __b)
22354 {
22355 return __arm_vmaxvq_u32 (__a, __b);
22356 }
22357
22358 __extension__ extern __inline uint32x4_t
22359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22360 __arm_vmaxq (uint32x4_t __a, uint32x4_t __b)
22361 {
22362 return __arm_vmaxq_u32 (__a, __b);
22363 }
22364
22365 __extension__ extern __inline uint32x4_t
22366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22367 __arm_vhsubq (uint32x4_t __a, uint32x4_t __b)
22368 {
22369 return __arm_vhsubq_u32 (__a, __b);
22370 }
22371
22372 __extension__ extern __inline uint32x4_t
22373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22374 __arm_vhsubq (uint32x4_t __a, uint32_t __b)
22375 {
22376 return __arm_vhsubq_n_u32 (__a, __b);
22377 }
22378
22379 __extension__ extern __inline uint32x4_t
22380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22381 __arm_vhaddq (uint32x4_t __a, uint32x4_t __b)
22382 {
22383 return __arm_vhaddq_u32 (__a, __b);
22384 }
22385
22386 __extension__ extern __inline uint32x4_t
22387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22388 __arm_vhaddq (uint32x4_t __a, uint32_t __b)
22389 {
22390 return __arm_vhaddq_n_u32 (__a, __b);
22391 }
22392
22393 __extension__ extern __inline uint32x4_t
22394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22395 __arm_veorq (uint32x4_t __a, uint32x4_t __b)
22396 {
22397 return __arm_veorq_u32 (__a, __b);
22398 }
22399
22400 __extension__ extern __inline mve_pred16_t
22401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22402 __arm_vcmpneq (uint32x4_t __a, uint32_t __b)
22403 {
22404 return __arm_vcmpneq_n_u32 (__a, __b);
22405 }
22406
22407 __extension__ extern __inline mve_pred16_t
22408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22409 __arm_vcmphiq (uint32x4_t __a, uint32x4_t __b)
22410 {
22411 return __arm_vcmphiq_u32 (__a, __b);
22412 }
22413
22414 __extension__ extern __inline mve_pred16_t
22415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22416 __arm_vcmphiq (uint32x4_t __a, uint32_t __b)
22417 {
22418 return __arm_vcmphiq_n_u32 (__a, __b);
22419 }
22420
22421 __extension__ extern __inline mve_pred16_t
22422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22423 __arm_vcmpeqq (uint32x4_t __a, uint32x4_t __b)
22424 {
22425 return __arm_vcmpeqq_u32 (__a, __b);
22426 }
22427
22428 __extension__ extern __inline mve_pred16_t
22429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22430 __arm_vcmpeqq (uint32x4_t __a, uint32_t __b)
22431 {
22432 return __arm_vcmpeqq_n_u32 (__a, __b);
22433 }
22434
22435 __extension__ extern __inline mve_pred16_t
22436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22437 __arm_vcmpcsq (uint32x4_t __a, uint32x4_t __b)
22438 {
22439 return __arm_vcmpcsq_u32 (__a, __b);
22440 }
22441
22442 __extension__ extern __inline mve_pred16_t
22443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22444 __arm_vcmpcsq (uint32x4_t __a, uint32_t __b)
22445 {
22446 return __arm_vcmpcsq_n_u32 (__a, __b);
22447 }
22448
22449 __extension__ extern __inline uint32x4_t
22450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22451 __arm_vcaddq_rot90 (uint32x4_t __a, uint32x4_t __b)
22452 {
22453 return __arm_vcaddq_rot90_u32 (__a, __b);
22454 }
22455
22456 __extension__ extern __inline uint32x4_t
22457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22458 __arm_vcaddq_rot270 (uint32x4_t __a, uint32x4_t __b)
22459 {
22460 return __arm_vcaddq_rot270_u32 (__a, __b);
22461 }
22462
22463 __extension__ extern __inline uint32x4_t
22464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22465 __arm_vbicq (uint32x4_t __a, uint32x4_t __b)
22466 {
22467 return __arm_vbicq_u32 (__a, __b);
22468 }
22469
22470 __extension__ extern __inline uint32x4_t
22471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22472 __arm_vandq (uint32x4_t __a, uint32x4_t __b)
22473 {
22474 return __arm_vandq_u32 (__a, __b);
22475 }
22476
22477 __extension__ extern __inline uint32_t
22478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22479 __arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p)
22480 {
22481 return __arm_vaddvq_p_u32 (__a, __p);
22482 }
22483
22484 __extension__ extern __inline uint32_t
22485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22486 __arm_vaddvaq (uint32_t __a, uint32x4_t __b)
22487 {
22488 return __arm_vaddvaq_u32 (__a, __b);
22489 }
22490
22491 __extension__ extern __inline uint32x4_t
22492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22493 __arm_vaddq (uint32x4_t __a, uint32_t __b)
22494 {
22495 return __arm_vaddq_n_u32 (__a, __b);
22496 }
22497
22498 __extension__ extern __inline uint32x4_t
22499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22500 __arm_vabdq (uint32x4_t __a, uint32x4_t __b)
22501 {
22502 return __arm_vabdq_u32 (__a, __b);
22503 }
22504
22505 __extension__ extern __inline uint32x4_t
22506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22507 __arm_vshlq_r (uint32x4_t __a, int32_t __b)
22508 {
22509 return __arm_vshlq_r_u32 (__a, __b);
22510 }
22511
22512 __extension__ extern __inline uint32x4_t
22513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22514 __arm_vrshlq (uint32x4_t __a, int32x4_t __b)
22515 {
22516 return __arm_vrshlq_u32 (__a, __b);
22517 }
22518
22519 __extension__ extern __inline uint32x4_t
22520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22521 __arm_vrshlq (uint32x4_t __a, int32_t __b)
22522 {
22523 return __arm_vrshlq_n_u32 (__a, __b);
22524 }
22525
22526 __extension__ extern __inline uint32x4_t
22527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22528 __arm_vqshlq (uint32x4_t __a, int32x4_t __b)
22529 {
22530 return __arm_vqshlq_u32 (__a, __b);
22531 }
22532
22533 __extension__ extern __inline uint32x4_t
22534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22535 __arm_vqshlq_r (uint32x4_t __a, int32_t __b)
22536 {
22537 return __arm_vqshlq_r_u32 (__a, __b);
22538 }
22539
22540 __extension__ extern __inline uint32x4_t
22541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22542 __arm_vqrshlq (uint32x4_t __a, int32x4_t __b)
22543 {
22544 return __arm_vqrshlq_u32 (__a, __b);
22545 }
22546
22547 __extension__ extern __inline uint32x4_t
22548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22549 __arm_vqrshlq (uint32x4_t __a, int32_t __b)
22550 {
22551 return __arm_vqrshlq_n_u32 (__a, __b);
22552 }
22553
22554 __extension__ extern __inline uint32_t
22555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22556 __arm_vminavq (uint32_t __a, int32x4_t __b)
22557 {
22558 return __arm_vminavq_s32 (__a, __b);
22559 }
22560
22561 __extension__ extern __inline uint32x4_t
22562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22563 __arm_vminaq (uint32x4_t __a, int32x4_t __b)
22564 {
22565 return __arm_vminaq_s32 (__a, __b);
22566 }
22567
22568 __extension__ extern __inline uint32_t
22569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22570 __arm_vmaxavq (uint32_t __a, int32x4_t __b)
22571 {
22572 return __arm_vmaxavq_s32 (__a, __b);
22573 }
22574
22575 __extension__ extern __inline uint32x4_t
22576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22577 __arm_vmaxaq (uint32x4_t __a, int32x4_t __b)
22578 {
22579 return __arm_vmaxaq_s32 (__a, __b);
22580 }
22581
22582 __extension__ extern __inline uint32x4_t
22583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22584 __arm_vbrsrq (uint32x4_t __a, int32_t __b)
22585 {
22586 return __arm_vbrsrq_n_u32 (__a, __b);
22587 }
22588
22589 __extension__ extern __inline uint32x4_t
22590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22591 __arm_vshlq_n (uint32x4_t __a, const int __imm)
22592 {
22593 return __arm_vshlq_n_u32 (__a, __imm);
22594 }
22595
22596 __extension__ extern __inline uint32x4_t
22597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22598 __arm_vrshrq (uint32x4_t __a, const int __imm)
22599 {
22600 return __arm_vrshrq_n_u32 (__a, __imm);
22601 }
22602
22603 __extension__ extern __inline uint32x4_t
22604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22605 __arm_vqshlq_n (uint32x4_t __a, const int __imm)
22606 {
22607 return __arm_vqshlq_n_u32 (__a, __imm);
22608 }
22609
22610 __extension__ extern __inline mve_pred16_t
22611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22612 __arm_vcmpneq (int32x4_t __a, int32_t __b)
22613 {
22614 return __arm_vcmpneq_n_s32 (__a, __b);
22615 }
22616
22617 __extension__ extern __inline mve_pred16_t
22618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22619 __arm_vcmpltq (int32x4_t __a, int32x4_t __b)
22620 {
22621 return __arm_vcmpltq_s32 (__a, __b);
22622 }
22623
22624 __extension__ extern __inline mve_pred16_t
22625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22626 __arm_vcmpltq (int32x4_t __a, int32_t __b)
22627 {
22628 return __arm_vcmpltq_n_s32 (__a, __b);
22629 }
22630
22631 __extension__ extern __inline mve_pred16_t
22632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22633 __arm_vcmpleq (int32x4_t __a, int32x4_t __b)
22634 {
22635 return __arm_vcmpleq_s32 (__a, __b);
22636 }
22637
22638 __extension__ extern __inline mve_pred16_t
22639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22640 __arm_vcmpleq (int32x4_t __a, int32_t __b)
22641 {
22642 return __arm_vcmpleq_n_s32 (__a, __b);
22643 }
22644
22645 __extension__ extern __inline mve_pred16_t
22646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22647 __arm_vcmpgtq (int32x4_t __a, int32x4_t __b)
22648 {
22649 return __arm_vcmpgtq_s32 (__a, __b);
22650 }
22651
22652 __extension__ extern __inline mve_pred16_t
22653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22654 __arm_vcmpgtq (int32x4_t __a, int32_t __b)
22655 {
22656 return __arm_vcmpgtq_n_s32 (__a, __b);
22657 }
22658
22659 __extension__ extern __inline mve_pred16_t
22660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22661 __arm_vcmpgeq (int32x4_t __a, int32x4_t __b)
22662 {
22663 return __arm_vcmpgeq_s32 (__a, __b);
22664 }
22665
22666 __extension__ extern __inline mve_pred16_t
22667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22668 __arm_vcmpgeq (int32x4_t __a, int32_t __b)
22669 {
22670 return __arm_vcmpgeq_n_s32 (__a, __b);
22671 }
22672
22673 __extension__ extern __inline mve_pred16_t
22674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22675 __arm_vcmpeqq (int32x4_t __a, int32x4_t __b)
22676 {
22677 return __arm_vcmpeqq_s32 (__a, __b);
22678 }
22679
22680 __extension__ extern __inline mve_pred16_t
22681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22682 __arm_vcmpeqq (int32x4_t __a, int32_t __b)
22683 {
22684 return __arm_vcmpeqq_n_s32 (__a, __b);
22685 }
22686
22687 __extension__ extern __inline uint32x4_t
22688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22689 __arm_vqshluq (int32x4_t __a, const int __imm)
22690 {
22691 return __arm_vqshluq_n_s32 (__a, __imm);
22692 }
22693
22694 __extension__ extern __inline int32_t
22695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22696 __arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p)
22697 {
22698 return __arm_vaddvq_p_s32 (__a, __p);
22699 }
22700
22701 __extension__ extern __inline int32x4_t
22702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22703 __arm_vsubq (int32x4_t __a, int32x4_t __b)
22704 {
22705 return __arm_vsubq_s32 (__a, __b);
22706 }
22707
22708 __extension__ extern __inline int32x4_t
22709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22710 __arm_vsubq (int32x4_t __a, int32_t __b)
22711 {
22712 return __arm_vsubq_n_s32 (__a, __b);
22713 }
22714
22715 __extension__ extern __inline int32x4_t
22716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22717 __arm_vshlq_r (int32x4_t __a, int32_t __b)
22718 {
22719 return __arm_vshlq_r_s32 (__a, __b);
22720 }
22721
22722 __extension__ extern __inline int32x4_t
22723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22724 __arm_vrshlq (int32x4_t __a, int32x4_t __b)
22725 {
22726 return __arm_vrshlq_s32 (__a, __b);
22727 }
22728
22729 __extension__ extern __inline int32x4_t
22730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22731 __arm_vrshlq (int32x4_t __a, int32_t __b)
22732 {
22733 return __arm_vrshlq_n_s32 (__a, __b);
22734 }
22735
22736 __extension__ extern __inline int32x4_t
22737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22738 __arm_vrmulhq (int32x4_t __a, int32x4_t __b)
22739 {
22740 return __arm_vrmulhq_s32 (__a, __b);
22741 }
22742
22743 __extension__ extern __inline int32x4_t
22744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22745 __arm_vrhaddq (int32x4_t __a, int32x4_t __b)
22746 {
22747 return __arm_vrhaddq_s32 (__a, __b);
22748 }
22749
22750 __extension__ extern __inline int32x4_t
22751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22752 __arm_vqsubq (int32x4_t __a, int32x4_t __b)
22753 {
22754 return __arm_vqsubq_s32 (__a, __b);
22755 }
22756
22757 __extension__ extern __inline int32x4_t
22758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22759 __arm_vqsubq (int32x4_t __a, int32_t __b)
22760 {
22761 return __arm_vqsubq_n_s32 (__a, __b);
22762 }
22763
22764 __extension__ extern __inline int32x4_t
22765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22766 __arm_vqshlq (int32x4_t __a, int32x4_t __b)
22767 {
22768 return __arm_vqshlq_s32 (__a, __b);
22769 }
22770
22771 __extension__ extern __inline int32x4_t
22772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22773 __arm_vqshlq_r (int32x4_t __a, int32_t __b)
22774 {
22775 return __arm_vqshlq_r_s32 (__a, __b);
22776 }
22777
22778 __extension__ extern __inline int32x4_t
22779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22780 __arm_vqrshlq (int32x4_t __a, int32x4_t __b)
22781 {
22782 return __arm_vqrshlq_s32 (__a, __b);
22783 }
22784
22785 __extension__ extern __inline int32x4_t
22786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22787 __arm_vqrshlq (int32x4_t __a, int32_t __b)
22788 {
22789 return __arm_vqrshlq_n_s32 (__a, __b);
22790 }
22791
22792 __extension__ extern __inline int32x4_t
22793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22794 __arm_vqrdmulhq (int32x4_t __a, int32x4_t __b)
22795 {
22796 return __arm_vqrdmulhq_s32 (__a, __b);
22797 }
22798
22799 __extension__ extern __inline int32x4_t
22800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22801 __arm_vqrdmulhq (int32x4_t __a, int32_t __b)
22802 {
22803 return __arm_vqrdmulhq_n_s32 (__a, __b);
22804 }
22805
22806 __extension__ extern __inline int32x4_t
22807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22808 __arm_vqdmulhq (int32x4_t __a, int32x4_t __b)
22809 {
22810 return __arm_vqdmulhq_s32 (__a, __b);
22811 }
22812
22813 __extension__ extern __inline int32x4_t
22814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22815 __arm_vqdmulhq (int32x4_t __a, int32_t __b)
22816 {
22817 return __arm_vqdmulhq_n_s32 (__a, __b);
22818 }
22819
22820 __extension__ extern __inline int32x4_t
22821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22822 __arm_vqaddq (int32x4_t __a, int32x4_t __b)
22823 {
22824 return __arm_vqaddq_s32 (__a, __b);
22825 }
22826
22827 __extension__ extern __inline int32x4_t
22828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22829 __arm_vqaddq (int32x4_t __a, int32_t __b)
22830 {
22831 return __arm_vqaddq_n_s32 (__a, __b);
22832 }
22833
22834 __extension__ extern __inline int32x4_t
22835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22836 __arm_vorrq (int32x4_t __a, int32x4_t __b)
22837 {
22838 return __arm_vorrq_s32 (__a, __b);
22839 }
22840
22841 __extension__ extern __inline int32x4_t
22842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22843 __arm_vornq (int32x4_t __a, int32x4_t __b)
22844 {
22845 return __arm_vornq_s32 (__a, __b);
22846 }
22847
22848 __extension__ extern __inline int32x4_t
22849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22850 __arm_vmulq (int32x4_t __a, int32x4_t __b)
22851 {
22852 return __arm_vmulq_s32 (__a, __b);
22853 }
22854
22855 __extension__ extern __inline int32x4_t
22856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22857 __arm_vmulq (int32x4_t __a, int32_t __b)
22858 {
22859 return __arm_vmulq_n_s32 (__a, __b);
22860 }
22861
22862 __extension__ extern __inline int64x2_t
22863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22864 __arm_vmulltq_int (int32x4_t __a, int32x4_t __b)
22865 {
22866 return __arm_vmulltq_int_s32 (__a, __b);
22867 }
22868
22869 __extension__ extern __inline int64x2_t
22870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22871 __arm_vmullbq_int (int32x4_t __a, int32x4_t __b)
22872 {
22873 return __arm_vmullbq_int_s32 (__a, __b);
22874 }
22875
22876 __extension__ extern __inline int32x4_t
22877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22878 __arm_vmulhq (int32x4_t __a, int32x4_t __b)
22879 {
22880 return __arm_vmulhq_s32 (__a, __b);
22881 }
22882
22883 __extension__ extern __inline int32_t
22884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22885 __arm_vmlsdavxq (int32x4_t __a, int32x4_t __b)
22886 {
22887 return __arm_vmlsdavxq_s32 (__a, __b);
22888 }
22889
22890 __extension__ extern __inline int32_t
22891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22892 __arm_vmlsdavq (int32x4_t __a, int32x4_t __b)
22893 {
22894 return __arm_vmlsdavq_s32 (__a, __b);
22895 }
22896
22897 __extension__ extern __inline int32_t
22898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22899 __arm_vmladavxq (int32x4_t __a, int32x4_t __b)
22900 {
22901 return __arm_vmladavxq_s32 (__a, __b);
22902 }
22903
22904 __extension__ extern __inline int32_t
22905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22906 __arm_vmladavq (int32x4_t __a, int32x4_t __b)
22907 {
22908 return __arm_vmladavq_s32 (__a, __b);
22909 }
22910
22911 __extension__ extern __inline int32_t
22912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22913 __arm_vminvq (int32_t __a, int32x4_t __b)
22914 {
22915 return __arm_vminvq_s32 (__a, __b);
22916 }
22917
22918 __extension__ extern __inline int32x4_t
22919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22920 __arm_vminq (int32x4_t __a, int32x4_t __b)
22921 {
22922 return __arm_vminq_s32 (__a, __b);
22923 }
22924
22925 __extension__ extern __inline int32_t
22926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22927 __arm_vmaxvq (int32_t __a, int32x4_t __b)
22928 {
22929 return __arm_vmaxvq_s32 (__a, __b);
22930 }
22931
22932 __extension__ extern __inline int32x4_t
22933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22934 __arm_vmaxq (int32x4_t __a, int32x4_t __b)
22935 {
22936 return __arm_vmaxq_s32 (__a, __b);
22937 }
22938
22939 __extension__ extern __inline int32x4_t
22940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22941 __arm_vhsubq (int32x4_t __a, int32x4_t __b)
22942 {
22943 return __arm_vhsubq_s32 (__a, __b);
22944 }
22945
22946 __extension__ extern __inline int32x4_t
22947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22948 __arm_vhsubq (int32x4_t __a, int32_t __b)
22949 {
22950 return __arm_vhsubq_n_s32 (__a, __b);
22951 }
22952
22953 __extension__ extern __inline int32x4_t
22954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22955 __arm_vhcaddq_rot90 (int32x4_t __a, int32x4_t __b)
22956 {
22957 return __arm_vhcaddq_rot90_s32 (__a, __b);
22958 }
22959
22960 __extension__ extern __inline int32x4_t
22961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22962 __arm_vhcaddq_rot270 (int32x4_t __a, int32x4_t __b)
22963 {
22964 return __arm_vhcaddq_rot270_s32 (__a, __b);
22965 }
22966
22967 __extension__ extern __inline int32x4_t
22968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22969 __arm_vhaddq (int32x4_t __a, int32x4_t __b)
22970 {
22971 return __arm_vhaddq_s32 (__a, __b);
22972 }
22973
22974 __extension__ extern __inline int32x4_t
22975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22976 __arm_vhaddq (int32x4_t __a, int32_t __b)
22977 {
22978 return __arm_vhaddq_n_s32 (__a, __b);
22979 }
22980
22981 __extension__ extern __inline int32x4_t
22982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22983 __arm_veorq (int32x4_t __a, int32x4_t __b)
22984 {
22985 return __arm_veorq_s32 (__a, __b);
22986 }
22987
22988 __extension__ extern __inline int32x4_t
22989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22990 __arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b)
22991 {
22992 return __arm_vcaddq_rot90_s32 (__a, __b);
22993 }
22994
22995 __extension__ extern __inline int32x4_t
22996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22997 __arm_vcaddq_rot270 (int32x4_t __a, int32x4_t __b)
22998 {
22999 return __arm_vcaddq_rot270_s32 (__a, __b);
23000 }
23001
23002 __extension__ extern __inline int32x4_t
23003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23004 __arm_vbrsrq (int32x4_t __a, int32_t __b)
23005 {
23006 return __arm_vbrsrq_n_s32 (__a, __b);
23007 }
23008
23009 __extension__ extern __inline int32x4_t
23010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23011 __arm_vbicq (int32x4_t __a, int32x4_t __b)
23012 {
23013 return __arm_vbicq_s32 (__a, __b);
23014 }
23015
23016 __extension__ extern __inline int32x4_t
23017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23018 __arm_vandq (int32x4_t __a, int32x4_t __b)
23019 {
23020 return __arm_vandq_s32 (__a, __b);
23021 }
23022
23023 __extension__ extern __inline int32_t
23024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23025 __arm_vaddvaq (int32_t __a, int32x4_t __b)
23026 {
23027 return __arm_vaddvaq_s32 (__a, __b);
23028 }
23029
23030 __extension__ extern __inline int32x4_t
23031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23032 __arm_vaddq (int32x4_t __a, int32_t __b)
23033 {
23034 return __arm_vaddq_n_s32 (__a, __b);
23035 }
23036
23037 __extension__ extern __inline int32x4_t
23038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23039 __arm_vabdq (int32x4_t __a, int32x4_t __b)
23040 {
23041 return __arm_vabdq_s32 (__a, __b);
23042 }
23043
23044 __extension__ extern __inline int32x4_t
23045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23046 __arm_vshlq_n (int32x4_t __a, const int __imm)
23047 {
23048 return __arm_vshlq_n_s32 (__a, __imm);
23049 }
23050
23051 __extension__ extern __inline int32x4_t
23052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23053 __arm_vrshrq (int32x4_t __a, const int __imm)
23054 {
23055 return __arm_vrshrq_n_s32 (__a, __imm);
23056 }
23057
23058 __extension__ extern __inline int32x4_t
23059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23060 __arm_vqshlq_n (int32x4_t __a, const int __imm)
23061 {
23062 return __arm_vqshlq_n_s32 (__a, __imm);
23063 }
23064
23065 __extension__ extern __inline uint8x16_t
23066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23067 __arm_vqmovntq (uint8x16_t __a, uint16x8_t __b)
23068 {
23069 return __arm_vqmovntq_u16 (__a, __b);
23070 }
23071
23072 __extension__ extern __inline uint8x16_t
23073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23074 __arm_vqmovnbq (uint8x16_t __a, uint16x8_t __b)
23075 {
23076 return __arm_vqmovnbq_u16 (__a, __b);
23077 }
23078
23079 __extension__ extern __inline uint16x8_t
23080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23081 __arm_vmulltq_poly (uint8x16_t __a, uint8x16_t __b)
23082 {
23083 return __arm_vmulltq_poly_p8 (__a, __b);
23084 }
23085
23086 __extension__ extern __inline uint16x8_t
23087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23088 __arm_vmullbq_poly (uint8x16_t __a, uint8x16_t __b)
23089 {
23090 return __arm_vmullbq_poly_p8 (__a, __b);
23091 }
23092
23093 __extension__ extern __inline uint8x16_t
23094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23095 __arm_vmovntq (uint8x16_t __a, uint16x8_t __b)
23096 {
23097 return __arm_vmovntq_u16 (__a, __b);
23098 }
23099
23100 __extension__ extern __inline uint8x16_t
23101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23102 __arm_vmovnbq (uint8x16_t __a, uint16x8_t __b)
23103 {
23104 return __arm_vmovnbq_u16 (__a, __b);
23105 }
23106
23107 __extension__ extern __inline uint64_t
23108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23109 __arm_vmlaldavq (uint16x8_t __a, uint16x8_t __b)
23110 {
23111 return __arm_vmlaldavq_u16 (__a, __b);
23112 }
23113
23114 __extension__ extern __inline uint8x16_t
23115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23116 __arm_vqmovuntq (uint8x16_t __a, int16x8_t __b)
23117 {
23118 return __arm_vqmovuntq_s16 (__a, __b);
23119 }
23120
23121 __extension__ extern __inline uint8x16_t
23122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23123 __arm_vqmovunbq (uint8x16_t __a, int16x8_t __b)
23124 {
23125 return __arm_vqmovunbq_s16 (__a, __b);
23126 }
23127
23128 __extension__ extern __inline uint16x8_t
23129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23130 __arm_vshlltq (uint8x16_t __a, const int __imm)
23131 {
23132 return __arm_vshlltq_n_u8 (__a, __imm);
23133 }
23134
23135 __extension__ extern __inline uint16x8_t
23136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23137 __arm_vshllbq (uint8x16_t __a, const int __imm)
23138 {
23139 return __arm_vshllbq_n_u8 (__a, __imm);
23140 }
23141
23142 __extension__ extern __inline uint16x8_t
23143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23144 __arm_vorrq (uint16x8_t __a, const int __imm)
23145 {
23146 return __arm_vorrq_n_u16 (__a, __imm);
23147 }
23148
23149 __extension__ extern __inline uint16x8_t
23150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23151 __arm_vbicq (uint16x8_t __a, const int __imm)
23152 {
23153 return __arm_vbicq_n_u16 (__a, __imm);
23154 }
23155
23156 __extension__ extern __inline int8x16_t
23157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23158 __arm_vqmovntq (int8x16_t __a, int16x8_t __b)
23159 {
23160 return __arm_vqmovntq_s16 (__a, __b);
23161 }
23162
23163 __extension__ extern __inline int8x16_t
23164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23165 __arm_vqmovnbq (int8x16_t __a, int16x8_t __b)
23166 {
23167 return __arm_vqmovnbq_s16 (__a, __b);
23168 }
23169
23170 __extension__ extern __inline int32x4_t
23171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23172 __arm_vqdmulltq (int16x8_t __a, int16x8_t __b)
23173 {
23174 return __arm_vqdmulltq_s16 (__a, __b);
23175 }
23176
23177 __extension__ extern __inline int32x4_t
23178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23179 __arm_vqdmulltq (int16x8_t __a, int16_t __b)
23180 {
23181 return __arm_vqdmulltq_n_s16 (__a, __b);
23182 }
23183
23184 __extension__ extern __inline int32x4_t
23185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23186 __arm_vqdmullbq (int16x8_t __a, int16x8_t __b)
23187 {
23188 return __arm_vqdmullbq_s16 (__a, __b);
23189 }
23190
23191 __extension__ extern __inline int32x4_t
23192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23193 __arm_vqdmullbq (int16x8_t __a, int16_t __b)
23194 {
23195 return __arm_vqdmullbq_n_s16 (__a, __b);
23196 }
23197
23198 __extension__ extern __inline int8x16_t
23199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23200 __arm_vmovntq (int8x16_t __a, int16x8_t __b)
23201 {
23202 return __arm_vmovntq_s16 (__a, __b);
23203 }
23204
23205 __extension__ extern __inline int8x16_t
23206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23207 __arm_vmovnbq (int8x16_t __a, int16x8_t __b)
23208 {
23209 return __arm_vmovnbq_s16 (__a, __b);
23210 }
23211
23212 __extension__ extern __inline int64_t
23213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23214 __arm_vmlsldavxq (int16x8_t __a, int16x8_t __b)
23215 {
23216 return __arm_vmlsldavxq_s16 (__a, __b);
23217 }
23218
23219 __extension__ extern __inline int64_t
23220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23221 __arm_vmlsldavq (int16x8_t __a, int16x8_t __b)
23222 {
23223 return __arm_vmlsldavq_s16 (__a, __b);
23224 }
23225
23226 __extension__ extern __inline int64_t
23227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23228 __arm_vmlaldavxq (int16x8_t __a, int16x8_t __b)
23229 {
23230 return __arm_vmlaldavxq_s16 (__a, __b);
23231 }
23232
23233 __extension__ extern __inline int64_t
23234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23235 __arm_vmlaldavq (int16x8_t __a, int16x8_t __b)
23236 {
23237 return __arm_vmlaldavq_s16 (__a, __b);
23238 }
23239
23240 __extension__ extern __inline int16x8_t
23241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23242 __arm_vshlltq (int8x16_t __a, const int __imm)
23243 {
23244 return __arm_vshlltq_n_s8 (__a, __imm);
23245 }
23246
23247 __extension__ extern __inline int16x8_t
23248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23249 __arm_vshllbq (int8x16_t __a, const int __imm)
23250 {
23251 return __arm_vshllbq_n_s8 (__a, __imm);
23252 }
23253
23254 __extension__ extern __inline int16x8_t
23255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23256 __arm_vorrq (int16x8_t __a, const int __imm)
23257 {
23258 return __arm_vorrq_n_s16 (__a, __imm);
23259 }
23260
23261 __extension__ extern __inline int16x8_t
23262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23263 __arm_vbicq (int16x8_t __a, const int __imm)
23264 {
23265 return __arm_vbicq_n_s16 (__a, __imm);
23266 }
23267
23268 __extension__ extern __inline uint16x8_t
23269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23270 __arm_vqmovntq (uint16x8_t __a, uint32x4_t __b)
23271 {
23272 return __arm_vqmovntq_u32 (__a, __b);
23273 }
23274
23275 __extension__ extern __inline uint16x8_t
23276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23277 __arm_vqmovnbq (uint16x8_t __a, uint32x4_t __b)
23278 {
23279 return __arm_vqmovnbq_u32 (__a, __b);
23280 }
23281
23282 __extension__ extern __inline uint32x4_t
23283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23284 __arm_vmulltq_poly (uint16x8_t __a, uint16x8_t __b)
23285 {
23286 return __arm_vmulltq_poly_p16 (__a, __b);
23287 }
23288
23289 __extension__ extern __inline uint32x4_t
23290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23291 __arm_vmullbq_poly (uint16x8_t __a, uint16x8_t __b)
23292 {
23293 return __arm_vmullbq_poly_p16 (__a, __b);
23294 }
23295
23296 __extension__ extern __inline uint16x8_t
23297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23298 __arm_vmovntq (uint16x8_t __a, uint32x4_t __b)
23299 {
23300 return __arm_vmovntq_u32 (__a, __b);
23301 }
23302
23303 __extension__ extern __inline uint16x8_t
23304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23305 __arm_vmovnbq (uint16x8_t __a, uint32x4_t __b)
23306 {
23307 return __arm_vmovnbq_u32 (__a, __b);
23308 }
23309
23310 __extension__ extern __inline uint64_t
23311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23312 __arm_vmlaldavq (uint32x4_t __a, uint32x4_t __b)
23313 {
23314 return __arm_vmlaldavq_u32 (__a, __b);
23315 }
23316
23317 __extension__ extern __inline uint16x8_t
23318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23319 __arm_vqmovuntq (uint16x8_t __a, int32x4_t __b)
23320 {
23321 return __arm_vqmovuntq_s32 (__a, __b);
23322 }
23323
23324 __extension__ extern __inline uint16x8_t
23325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23326 __arm_vqmovunbq (uint16x8_t __a, int32x4_t __b)
23327 {
23328 return __arm_vqmovunbq_s32 (__a, __b);
23329 }
23330
23331 __extension__ extern __inline uint32x4_t
23332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23333 __arm_vshlltq (uint16x8_t __a, const int __imm)
23334 {
23335 return __arm_vshlltq_n_u16 (__a, __imm);
23336 }
23337
23338 __extension__ extern __inline uint32x4_t
23339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23340 __arm_vshllbq (uint16x8_t __a, const int __imm)
23341 {
23342 return __arm_vshllbq_n_u16 (__a, __imm);
23343 }
23344
23345 __extension__ extern __inline uint32x4_t
23346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23347 __arm_vorrq (uint32x4_t __a, const int __imm)
23348 {
23349 return __arm_vorrq_n_u32 (__a, __imm);
23350 }
23351
23352 __extension__ extern __inline uint32x4_t
23353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23354 __arm_vbicq (uint32x4_t __a, const int __imm)
23355 {
23356 return __arm_vbicq_n_u32 (__a, __imm);
23357 }
23358
23359 __extension__ extern __inline int16x8_t
23360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23361 __arm_vqmovntq (int16x8_t __a, int32x4_t __b)
23362 {
23363 return __arm_vqmovntq_s32 (__a, __b);
23364 }
23365
23366 __extension__ extern __inline int16x8_t
23367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23368 __arm_vqmovnbq (int16x8_t __a, int32x4_t __b)
23369 {
23370 return __arm_vqmovnbq_s32 (__a, __b);
23371 }
23372
23373 __extension__ extern __inline int64x2_t
23374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23375 __arm_vqdmulltq (int32x4_t __a, int32x4_t __b)
23376 {
23377 return __arm_vqdmulltq_s32 (__a, __b);
23378 }
23379
23380 __extension__ extern __inline int64x2_t
23381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23382 __arm_vqdmulltq (int32x4_t __a, int32_t __b)
23383 {
23384 return __arm_vqdmulltq_n_s32 (__a, __b);
23385 }
23386
23387 __extension__ extern __inline int64x2_t
23388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23389 __arm_vqdmullbq (int32x4_t __a, int32x4_t __b)
23390 {
23391 return __arm_vqdmullbq_s32 (__a, __b);
23392 }
23393
23394 __extension__ extern __inline int64x2_t
23395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23396 __arm_vqdmullbq (int32x4_t __a, int32_t __b)
23397 {
23398 return __arm_vqdmullbq_n_s32 (__a, __b);
23399 }
23400
23401 __extension__ extern __inline int16x8_t
23402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23403 __arm_vmovntq (int16x8_t __a, int32x4_t __b)
23404 {
23405 return __arm_vmovntq_s32 (__a, __b);
23406 }
23407
23408 __extension__ extern __inline int16x8_t
23409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23410 __arm_vmovnbq (int16x8_t __a, int32x4_t __b)
23411 {
23412 return __arm_vmovnbq_s32 (__a, __b);
23413 }
23414
23415 __extension__ extern __inline int64_t
23416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23417 __arm_vmlsldavxq (int32x4_t __a, int32x4_t __b)
23418 {
23419 return __arm_vmlsldavxq_s32 (__a, __b);
23420 }
23421
23422 __extension__ extern __inline int64_t
23423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23424 __arm_vmlsldavq (int32x4_t __a, int32x4_t __b)
23425 {
23426 return __arm_vmlsldavq_s32 (__a, __b);
23427 }
23428
23429 __extension__ extern __inline int64_t
23430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23431 __arm_vmlaldavxq (int32x4_t __a, int32x4_t __b)
23432 {
23433 return __arm_vmlaldavxq_s32 (__a, __b);
23434 }
23435
23436 __extension__ extern __inline int64_t
23437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23438 __arm_vmlaldavq (int32x4_t __a, int32x4_t __b)
23439 {
23440 return __arm_vmlaldavq_s32 (__a, __b);
23441 }
23442
23443 __extension__ extern __inline int32x4_t
23444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23445 __arm_vshlltq (int16x8_t __a, const int __imm)
23446 {
23447 return __arm_vshlltq_n_s16 (__a, __imm);
23448 }
23449
23450 __extension__ extern __inline int32x4_t
23451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23452 __arm_vshllbq (int16x8_t __a, const int __imm)
23453 {
23454 return __arm_vshllbq_n_s16 (__a, __imm);
23455 }
23456
23457 __extension__ extern __inline int32x4_t
23458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23459 __arm_vorrq (int32x4_t __a, const int __imm)
23460 {
23461 return __arm_vorrq_n_s32 (__a, __imm);
23462 }
23463
23464 __extension__ extern __inline int32x4_t
23465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23466 __arm_vbicq (int32x4_t __a, const int __imm)
23467 {
23468 return __arm_vbicq_n_s32 (__a, __imm);
23469 }
23470
23471 __extension__ extern __inline uint64_t
23472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23473 __arm_vrmlaldavhq (uint32x4_t __a, uint32x4_t __b)
23474 {
23475 return __arm_vrmlaldavhq_u32 (__a, __b);
23476 }
23477
23478 __extension__ extern __inline uint64_t
23479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23480 __arm_vaddlvaq (uint64_t __a, uint32x4_t __b)
23481 {
23482 return __arm_vaddlvaq_u32 (__a, __b);
23483 }
23484
23485 __extension__ extern __inline int64_t
23486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23487 __arm_vrmlsldavhxq (int32x4_t __a, int32x4_t __b)
23488 {
23489 return __arm_vrmlsldavhxq_s32 (__a, __b);
23490 }
23491
23492 __extension__ extern __inline int64_t
23493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23494 __arm_vrmlsldavhq (int32x4_t __a, int32x4_t __b)
23495 {
23496 return __arm_vrmlsldavhq_s32 (__a, __b);
23497 }
23498
23499 __extension__ extern __inline int64_t
23500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23501 __arm_vrmlaldavhxq (int32x4_t __a, int32x4_t __b)
23502 {
23503 return __arm_vrmlaldavhxq_s32 (__a, __b);
23504 }
23505
23506 __extension__ extern __inline int64_t
23507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23508 __arm_vrmlaldavhq (int32x4_t __a, int32x4_t __b)
23509 {
23510 return __arm_vrmlaldavhq_s32 (__a, __b);
23511 }
23512
23513 __extension__ extern __inline int64_t
23514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23515 __arm_vaddlvaq (int64_t __a, int32x4_t __b)
23516 {
23517 return __arm_vaddlvaq_s32 (__a, __b);
23518 }
23519
23520 __extension__ extern __inline uint32_t
23521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23522 __arm_vabavq (uint32_t __a, int8x16_t __b, int8x16_t __c)
23523 {
23524 return __arm_vabavq_s8 (__a, __b, __c);
23525 }
23526
23527 __extension__ extern __inline uint32_t
23528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23529 __arm_vabavq (uint32_t __a, int16x8_t __b, int16x8_t __c)
23530 {
23531 return __arm_vabavq_s16 (__a, __b, __c);
23532 }
23533
23534 __extension__ extern __inline uint32_t
23535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23536 __arm_vabavq (uint32_t __a, int32x4_t __b, int32x4_t __c)
23537 {
23538 return __arm_vabavq_s32 (__a, __b, __c);
23539 }
23540
23541 __extension__ extern __inline uint32_t
23542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23543 __arm_vabavq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
23544 {
23545 return __arm_vabavq_u8 (__a, __b, __c);
23546 }
23547
23548 __extension__ extern __inline uint32_t
23549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23550 __arm_vabavq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
23551 {
23552 return __arm_vabavq_u16 (__a, __b, __c);
23553 }
23554
23555 __extension__ extern __inline uint32_t
23556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23557 __arm_vabavq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
23558 {
23559 return __arm_vabavq_u32 (__a, __b, __c);
23560 }
23561
23562 __extension__ extern __inline int16x8_t
23563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23564 __arm_vbicq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
23565 {
23566 return __arm_vbicq_m_n_s16 (__a, __imm, __p);
23567 }
23568
23569 __extension__ extern __inline int32x4_t
23570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23571 __arm_vbicq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
23572 {
23573 return __arm_vbicq_m_n_s32 (__a, __imm, __p);
23574 }
23575
23576 __extension__ extern __inline uint16x8_t
23577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23578 __arm_vbicq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
23579 {
23580 return __arm_vbicq_m_n_u16 (__a, __imm, __p);
23581 }
23582
23583 __extension__ extern __inline uint32x4_t
23584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23585 __arm_vbicq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
23586 {
23587 return __arm_vbicq_m_n_u32 (__a, __imm, __p);
23588 }
23589
23590 __extension__ extern __inline int8x16_t
23591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23592 __arm_vqrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
23593 {
23594 return __arm_vqrshrnbq_n_s16 (__a, __b, __imm);
23595 }
23596
23597 __extension__ extern __inline uint8x16_t
23598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23599 __arm_vqrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
23600 {
23601 return __arm_vqrshrnbq_n_u16 (__a, __b, __imm);
23602 }
23603
23604 __extension__ extern __inline int16x8_t
23605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23606 __arm_vqrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
23607 {
23608 return __arm_vqrshrnbq_n_s32 (__a, __b, __imm);
23609 }
23610
23611 __extension__ extern __inline uint16x8_t
23612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23613 __arm_vqrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
23614 {
23615 return __arm_vqrshrnbq_n_u32 (__a, __b, __imm);
23616 }
23617
23618 __extension__ extern __inline uint8x16_t
23619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23620 __arm_vqrshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
23621 {
23622 return __arm_vqrshrunbq_n_s16 (__a, __b, __imm);
23623 }
23624
23625 __extension__ extern __inline uint16x8_t
23626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23627 __arm_vqrshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
23628 {
23629 return __arm_vqrshrunbq_n_s32 (__a, __b, __imm);
23630 }
23631
23632 __extension__ extern __inline int64_t
23633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23634 __arm_vrmlaldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
23635 {
23636 return __arm_vrmlaldavhaq_s32 (__a, __b, __c);
23637 }
23638
23639 __extension__ extern __inline uint64_t
23640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23641 __arm_vrmlaldavhaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
23642 {
23643 return __arm_vrmlaldavhaq_u32 (__a, __b, __c);
23644 }
23645
23646 __extension__ extern __inline int8x16_t
23647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23648 __arm_vshlcq (int8x16_t __a, uint32_t * __b, const int __imm)
23649 {
23650 return __arm_vshlcq_s8 (__a, __b, __imm);
23651 }
23652
23653 __extension__ extern __inline uint8x16_t
23654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23655 __arm_vshlcq (uint8x16_t __a, uint32_t * __b, const int __imm)
23656 {
23657 return __arm_vshlcq_u8 (__a, __b, __imm);
23658 }
23659
23660 __extension__ extern __inline int16x8_t
23661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23662 __arm_vshlcq (int16x8_t __a, uint32_t * __b, const int __imm)
23663 {
23664 return __arm_vshlcq_s16 (__a, __b, __imm);
23665 }
23666
23667 __extension__ extern __inline uint16x8_t
23668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23669 __arm_vshlcq (uint16x8_t __a, uint32_t * __b, const int __imm)
23670 {
23671 return __arm_vshlcq_u16 (__a, __b, __imm);
23672 }
23673
23674 __extension__ extern __inline int32x4_t
23675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23676 __arm_vshlcq (int32x4_t __a, uint32_t * __b, const int __imm)
23677 {
23678 return __arm_vshlcq_s32 (__a, __b, __imm);
23679 }
23680
23681 __extension__ extern __inline uint32x4_t
23682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23683 __arm_vshlcq (uint32x4_t __a, uint32_t * __b, const int __imm)
23684 {
23685 return __arm_vshlcq_u32 (__a, __b, __imm);
23686 }
23687
23688 __extension__ extern __inline uint8x16_t
23689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23690 __arm_vpselq (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23691 {
23692 return __arm_vpselq_u8 (__a, __b, __p);
23693 }
23694
23695 __extension__ extern __inline int8x16_t
23696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23697 __arm_vpselq (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23698 {
23699 return __arm_vpselq_s8 (__a, __b, __p);
23700 }
23701
23702 __extension__ extern __inline uint8x16_t
23703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23704 __arm_vrev64q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23705 {
23706 return __arm_vrev64q_m_u8 (__inactive, __a, __p);
23707 }
23708
23709 __extension__ extern __inline uint8x16_t
23710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23711 __arm_vmvnq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23712 {
23713 return __arm_vmvnq_m_u8 (__inactive, __a, __p);
23714 }
23715
23716 __extension__ extern __inline uint8x16_t
23717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23718 __arm_vmlasq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
23719 {
23720 return __arm_vmlasq_n_u8 (__a, __b, __c);
23721 }
23722
23723 __extension__ extern __inline uint8x16_t
23724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23725 __arm_vmlaq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
23726 {
23727 return __arm_vmlaq_n_u8 (__a, __b, __c);
23728 }
23729
23730 __extension__ extern __inline uint32_t
23731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23732 __arm_vmladavq_p (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23733 {
23734 return __arm_vmladavq_p_u8 (__a, __b, __p);
23735 }
23736
23737 __extension__ extern __inline uint32_t
23738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23739 __arm_vmladavaq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
23740 {
23741 return __arm_vmladavaq_u8 (__a, __b, __c);
23742 }
23743
23744 __extension__ extern __inline uint8_t
23745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23746 __arm_vminvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
23747 {
23748 return __arm_vminvq_p_u8 (__a, __b, __p);
23749 }
23750
23751 __extension__ extern __inline uint8_t
23752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23753 __arm_vmaxvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
23754 {
23755 return __arm_vmaxvq_p_u8 (__a, __b, __p);
23756 }
23757
23758 __extension__ extern __inline uint8x16_t
23759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23760 __arm_vdupq_m (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
23761 {
23762 return __arm_vdupq_m_n_u8 (__inactive, __a, __p);
23763 }
23764
23765 __extension__ extern __inline mve_pred16_t
23766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23767 __arm_vcmpneq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23768 {
23769 return __arm_vcmpneq_m_u8 (__a, __b, __p);
23770 }
23771
23772 __extension__ extern __inline mve_pred16_t
23773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23774 __arm_vcmpneq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23775 {
23776 return __arm_vcmpneq_m_n_u8 (__a, __b, __p);
23777 }
23778
23779 __extension__ extern __inline mve_pred16_t
23780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23781 __arm_vcmphiq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23782 {
23783 return __arm_vcmphiq_m_u8 (__a, __b, __p);
23784 }
23785
23786 __extension__ extern __inline mve_pred16_t
23787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23788 __arm_vcmphiq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23789 {
23790 return __arm_vcmphiq_m_n_u8 (__a, __b, __p);
23791 }
23792
23793 __extension__ extern __inline mve_pred16_t
23794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23795 __arm_vcmpeqq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23796 {
23797 return __arm_vcmpeqq_m_u8 (__a, __b, __p);
23798 }
23799
23800 __extension__ extern __inline mve_pred16_t
23801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23802 __arm_vcmpeqq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23803 {
23804 return __arm_vcmpeqq_m_n_u8 (__a, __b, __p);
23805 }
23806
23807 __extension__ extern __inline mve_pred16_t
23808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23809 __arm_vcmpcsq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
23810 {
23811 return __arm_vcmpcsq_m_u8 (__a, __b, __p);
23812 }
23813
23814 __extension__ extern __inline mve_pred16_t
23815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23816 __arm_vcmpcsq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
23817 {
23818 return __arm_vcmpcsq_m_n_u8 (__a, __b, __p);
23819 }
23820
23821 __extension__ extern __inline uint8x16_t
23822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23823 __arm_vclzq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
23824 {
23825 return __arm_vclzq_m_u8 (__inactive, __a, __p);
23826 }
23827
23828 __extension__ extern __inline uint32_t
23829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23830 __arm_vaddvaq_p (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
23831 {
23832 return __arm_vaddvaq_p_u8 (__a, __b, __p);
23833 }
23834
23835 __extension__ extern __inline uint8x16_t
23836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23837 __arm_vsriq (uint8x16_t __a, uint8x16_t __b, const int __imm)
23838 {
23839 return __arm_vsriq_n_u8 (__a, __b, __imm);
23840 }
23841
23842 __extension__ extern __inline uint8x16_t
23843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23844 __arm_vsliq (uint8x16_t __a, uint8x16_t __b, const int __imm)
23845 {
23846 return __arm_vsliq_n_u8 (__a, __b, __imm);
23847 }
23848
23849 __extension__ extern __inline uint8x16_t
23850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23851 __arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23852 {
23853 return __arm_vshlq_m_r_u8 (__a, __b, __p);
23854 }
23855
23856 __extension__ extern __inline uint8x16_t
23857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23858 __arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23859 {
23860 return __arm_vrshlq_m_n_u8 (__a, __b, __p);
23861 }
23862
23863 __extension__ extern __inline uint8x16_t
23864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23865 __arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23866 {
23867 return __arm_vqshlq_m_r_u8 (__a, __b, __p);
23868 }
23869
23870 __extension__ extern __inline uint8x16_t
23871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23872 __arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
23873 {
23874 return __arm_vqrshlq_m_n_u8 (__a, __b, __p);
23875 }
23876
23877 __extension__ extern __inline uint8_t
23878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23879 __arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
23880 {
23881 return __arm_vminavq_p_s8 (__a, __b, __p);
23882 }
23883
23884 __extension__ extern __inline uint8x16_t
23885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23886 __arm_vminaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23887 {
23888 return __arm_vminaq_m_s8 (__a, __b, __p);
23889 }
23890
23891 __extension__ extern __inline uint8_t
23892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23893 __arm_vmaxavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
23894 {
23895 return __arm_vmaxavq_p_s8 (__a, __b, __p);
23896 }
23897
23898 __extension__ extern __inline uint8x16_t
23899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23900 __arm_vmaxaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23901 {
23902 return __arm_vmaxaq_m_s8 (__a, __b, __p);
23903 }
23904
23905 __extension__ extern __inline mve_pred16_t
23906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23907 __arm_vcmpneq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23908 {
23909 return __arm_vcmpneq_m_s8 (__a, __b, __p);
23910 }
23911
23912 __extension__ extern __inline mve_pred16_t
23913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23914 __arm_vcmpneq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23915 {
23916 return __arm_vcmpneq_m_n_s8 (__a, __b, __p);
23917 }
23918
23919 __extension__ extern __inline mve_pred16_t
23920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23921 __arm_vcmpltq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23922 {
23923 return __arm_vcmpltq_m_s8 (__a, __b, __p);
23924 }
23925
23926 __extension__ extern __inline mve_pred16_t
23927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23928 __arm_vcmpltq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23929 {
23930 return __arm_vcmpltq_m_n_s8 (__a, __b, __p);
23931 }
23932
23933 __extension__ extern __inline mve_pred16_t
23934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23935 __arm_vcmpleq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23936 {
23937 return __arm_vcmpleq_m_s8 (__a, __b, __p);
23938 }
23939
23940 __extension__ extern __inline mve_pred16_t
23941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23942 __arm_vcmpleq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23943 {
23944 return __arm_vcmpleq_m_n_s8 (__a, __b, __p);
23945 }
23946
23947 __extension__ extern __inline mve_pred16_t
23948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23949 __arm_vcmpgtq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23950 {
23951 return __arm_vcmpgtq_m_s8 (__a, __b, __p);
23952 }
23953
23954 __extension__ extern __inline mve_pred16_t
23955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23956 __arm_vcmpgtq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23957 {
23958 return __arm_vcmpgtq_m_n_s8 (__a, __b, __p);
23959 }
23960
23961 __extension__ extern __inline mve_pred16_t
23962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23963 __arm_vcmpgeq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23964 {
23965 return __arm_vcmpgeq_m_s8 (__a, __b, __p);
23966 }
23967
23968 __extension__ extern __inline mve_pred16_t
23969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23970 __arm_vcmpgeq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23971 {
23972 return __arm_vcmpgeq_m_n_s8 (__a, __b, __p);
23973 }
23974
23975 __extension__ extern __inline mve_pred16_t
23976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23977 __arm_vcmpeqq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
23978 {
23979 return __arm_vcmpeqq_m_s8 (__a, __b, __p);
23980 }
23981
23982 __extension__ extern __inline mve_pred16_t
23983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23984 __arm_vcmpeqq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
23985 {
23986 return __arm_vcmpeqq_m_n_s8 (__a, __b, __p);
23987 }
23988
23989 __extension__ extern __inline int8x16_t
23990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23991 __arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
23992 {
23993 return __arm_vshlq_m_r_s8 (__a, __b, __p);
23994 }
23995
23996 __extension__ extern __inline int8x16_t
23997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23998 __arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
23999 {
24000 return __arm_vrshlq_m_n_s8 (__a, __b, __p);
24001 }
24002
24003 __extension__ extern __inline int8x16_t
24004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24005 __arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24006 {
24007 return __arm_vrev64q_m_s8 (__inactive, __a, __p);
24008 }
24009
24010 __extension__ extern __inline int8x16_t
24011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24012 __arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
24013 {
24014 return __arm_vqshlq_m_r_s8 (__a, __b, __p);
24015 }
24016
24017 __extension__ extern __inline int8x16_t
24018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24019 __arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
24020 {
24021 return __arm_vqrshlq_m_n_s8 (__a, __b, __p);
24022 }
24023
24024 __extension__ extern __inline int8x16_t
24025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24026 __arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24027 {
24028 return __arm_vqnegq_m_s8 (__inactive, __a, __p);
24029 }
24030
24031 __extension__ extern __inline int8x16_t
24032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24033 __arm_vqabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24034 {
24035 return __arm_vqabsq_m_s8 (__inactive, __a, __p);
24036 }
24037
24038 __extension__ extern __inline int8x16_t
24039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24040 __arm_vnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24041 {
24042 return __arm_vnegq_m_s8 (__inactive, __a, __p);
24043 }
24044
24045 __extension__ extern __inline int8x16_t
24046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24047 __arm_vmvnq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24048 {
24049 return __arm_vmvnq_m_s8 (__inactive, __a, __p);
24050 }
24051
24052 __extension__ extern __inline int32_t
24053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24054 __arm_vmlsdavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24055 {
24056 return __arm_vmlsdavxq_p_s8 (__a, __b, __p);
24057 }
24058
24059 __extension__ extern __inline int32_t
24060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24061 __arm_vmlsdavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24062 {
24063 return __arm_vmlsdavq_p_s8 (__a, __b, __p);
24064 }
24065
24066 __extension__ extern __inline int32_t
24067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24068 __arm_vmladavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24069 {
24070 return __arm_vmladavxq_p_s8 (__a, __b, __p);
24071 }
24072
24073 __extension__ extern __inline int32_t
24074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24075 __arm_vmladavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
24076 {
24077 return __arm_vmladavq_p_s8 (__a, __b, __p);
24078 }
24079
24080 __extension__ extern __inline int8_t
24081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24082 __arm_vminvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
24083 {
24084 return __arm_vminvq_p_s8 (__a, __b, __p);
24085 }
24086
24087 __extension__ extern __inline int8_t
24088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24089 __arm_vmaxvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
24090 {
24091 return __arm_vmaxvq_p_s8 (__a, __b, __p);
24092 }
24093
24094 __extension__ extern __inline int8x16_t
24095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24096 __arm_vdupq_m (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
24097 {
24098 return __arm_vdupq_m_n_s8 (__inactive, __a, __p);
24099 }
24100
24101 __extension__ extern __inline int8x16_t
24102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24103 __arm_vclzq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24104 {
24105 return __arm_vclzq_m_s8 (__inactive, __a, __p);
24106 }
24107
24108 __extension__ extern __inline int8x16_t
24109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24110 __arm_vclsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24111 {
24112 return __arm_vclsq_m_s8 (__inactive, __a, __p);
24113 }
24114
24115 __extension__ extern __inline int32_t
24116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24117 __arm_vaddvaq_p (int32_t __a, int8x16_t __b, mve_pred16_t __p)
24118 {
24119 return __arm_vaddvaq_p_s8 (__a, __b, __p);
24120 }
24121
24122 __extension__ extern __inline int8x16_t
24123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24124 __arm_vabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
24125 {
24126 return __arm_vabsq_m_s8 (__inactive, __a, __p);
24127 }
24128
24129 __extension__ extern __inline int8x16_t
24130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24131 __arm_vqrdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24132 {
24133 return __arm_vqrdmlsdhxq_s8 (__inactive, __a, __b);
24134 }
24135
24136 __extension__ extern __inline int8x16_t
24137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24138 __arm_vqrdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24139 {
24140 return __arm_vqrdmlsdhq_s8 (__inactive, __a, __b);
24141 }
24142
24143 __extension__ extern __inline int8x16_t
24144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24145 __arm_vqrdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
24146 {
24147 return __arm_vqrdmlashq_n_s8 (__a, __b, __c);
24148 }
24149
24150 __extension__ extern __inline int8x16_t
24151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24152 __arm_vqdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
24153 {
24154 return __arm_vqdmlashq_n_s8 (__a, __b, __c);
24155 }
24156
24157 __extension__ extern __inline int8x16_t
24158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24159 __arm_vqrdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
24160 {
24161 return __arm_vqrdmlahq_n_s8 (__a, __b, __c);
24162 }
24163
24164 __extension__ extern __inline int8x16_t
24165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24166 __arm_vqrdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24167 {
24168 return __arm_vqrdmladhxq_s8 (__inactive, __a, __b);
24169 }
24170
24171 __extension__ extern __inline int8x16_t
24172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24173 __arm_vqrdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24174 {
24175 return __arm_vqrdmladhq_s8 (__inactive, __a, __b);
24176 }
24177
24178 __extension__ extern __inline int8x16_t
24179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24180 __arm_vqdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24181 {
24182 return __arm_vqdmlsdhxq_s8 (__inactive, __a, __b);
24183 }
24184
24185 __extension__ extern __inline int8x16_t
24186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24187 __arm_vqdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24188 {
24189 return __arm_vqdmlsdhq_s8 (__inactive, __a, __b);
24190 }
24191
24192 __extension__ extern __inline int8x16_t
24193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24194 __arm_vqdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
24195 {
24196 return __arm_vqdmlahq_n_s8 (__a, __b, __c);
24197 }
24198
24199 __extension__ extern __inline int8x16_t
24200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24201 __arm_vqdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24202 {
24203 return __arm_vqdmladhxq_s8 (__inactive, __a, __b);
24204 }
24205
24206 __extension__ extern __inline int8x16_t
24207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24208 __arm_vqdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
24209 {
24210 return __arm_vqdmladhq_s8 (__inactive, __a, __b);
24211 }
24212
24213 __extension__ extern __inline int32_t
24214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24215 __arm_vmlsdavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
24216 {
24217 return __arm_vmlsdavaxq_s8 (__a, __b, __c);
24218 }
24219
24220 __extension__ extern __inline int32_t
24221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24222 __arm_vmlsdavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
24223 {
24224 return __arm_vmlsdavaq_s8 (__a, __b, __c);
24225 }
24226
24227 __extension__ extern __inline int8x16_t
24228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24229 __arm_vmlasq (int8x16_t __a, int8x16_t __b, int8_t __c)
24230 {
24231 return __arm_vmlasq_n_s8 (__a, __b, __c);
24232 }
24233
24234 __extension__ extern __inline int8x16_t
24235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24236 __arm_vmlaq (int8x16_t __a, int8x16_t __b, int8_t __c)
24237 {
24238 return __arm_vmlaq_n_s8 (__a, __b, __c);
24239 }
24240
24241 __extension__ extern __inline int32_t
24242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24243 __arm_vmladavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
24244 {
24245 return __arm_vmladavaxq_s8 (__a, __b, __c);
24246 }
24247
24248 __extension__ extern __inline int32_t
24249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24250 __arm_vmladavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
24251 {
24252 return __arm_vmladavaq_s8 (__a, __b, __c);
24253 }
24254
24255 __extension__ extern __inline int8x16_t
24256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24257 __arm_vsriq (int8x16_t __a, int8x16_t __b, const int __imm)
24258 {
24259 return __arm_vsriq_n_s8 (__a, __b, __imm);
24260 }
24261
24262 __extension__ extern __inline int8x16_t
24263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24264 __arm_vsliq (int8x16_t __a, int8x16_t __b, const int __imm)
24265 {
24266 return __arm_vsliq_n_s8 (__a, __b, __imm);
24267 }
24268
24269 __extension__ extern __inline uint16x8_t
24270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24271 __arm_vpselq (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24272 {
24273 return __arm_vpselq_u16 (__a, __b, __p);
24274 }
24275
24276 __extension__ extern __inline int16x8_t
24277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24278 __arm_vpselq (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24279 {
24280 return __arm_vpselq_s16 (__a, __b, __p);
24281 }
24282
24283 __extension__ extern __inline uint16x8_t
24284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24285 __arm_vrev64q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24286 {
24287 return __arm_vrev64q_m_u16 (__inactive, __a, __p);
24288 }
24289
24290 __extension__ extern __inline uint16x8_t
24291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24292 __arm_vmvnq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24293 {
24294 return __arm_vmvnq_m_u16 (__inactive, __a, __p);
24295 }
24296
24297 __extension__ extern __inline uint16x8_t
24298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24299 __arm_vmlasq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
24300 {
24301 return __arm_vmlasq_n_u16 (__a, __b, __c);
24302 }
24303
24304 __extension__ extern __inline uint16x8_t
24305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24306 __arm_vmlaq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
24307 {
24308 return __arm_vmlaq_n_u16 (__a, __b, __c);
24309 }
24310
24311 __extension__ extern __inline uint32_t
24312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24313 __arm_vmladavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24314 {
24315 return __arm_vmladavq_p_u16 (__a, __b, __p);
24316 }
24317
24318 __extension__ extern __inline uint32_t
24319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24320 __arm_vmladavaq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
24321 {
24322 return __arm_vmladavaq_u16 (__a, __b, __c);
24323 }
24324
24325 __extension__ extern __inline uint16_t
24326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24327 __arm_vminvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
24328 {
24329 return __arm_vminvq_p_u16 (__a, __b, __p);
24330 }
24331
24332 __extension__ extern __inline uint16_t
24333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24334 __arm_vmaxvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
24335 {
24336 return __arm_vmaxvq_p_u16 (__a, __b, __p);
24337 }
24338
24339 __extension__ extern __inline uint16x8_t
24340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24341 __arm_vdupq_m (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
24342 {
24343 return __arm_vdupq_m_n_u16 (__inactive, __a, __p);
24344 }
24345
24346 __extension__ extern __inline mve_pred16_t
24347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24348 __arm_vcmpneq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24349 {
24350 return __arm_vcmpneq_m_u16 (__a, __b, __p);
24351 }
24352
24353 __extension__ extern __inline mve_pred16_t
24354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24355 __arm_vcmpneq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24356 {
24357 return __arm_vcmpneq_m_n_u16 (__a, __b, __p);
24358 }
24359
24360 __extension__ extern __inline mve_pred16_t
24361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24362 __arm_vcmphiq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24363 {
24364 return __arm_vcmphiq_m_u16 (__a, __b, __p);
24365 }
24366
24367 __extension__ extern __inline mve_pred16_t
24368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24369 __arm_vcmphiq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24370 {
24371 return __arm_vcmphiq_m_n_u16 (__a, __b, __p);
24372 }
24373
24374 __extension__ extern __inline mve_pred16_t
24375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24376 __arm_vcmpeqq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24377 {
24378 return __arm_vcmpeqq_m_u16 (__a, __b, __p);
24379 }
24380
24381 __extension__ extern __inline mve_pred16_t
24382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24383 __arm_vcmpeqq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24384 {
24385 return __arm_vcmpeqq_m_n_u16 (__a, __b, __p);
24386 }
24387
24388 __extension__ extern __inline mve_pred16_t
24389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24390 __arm_vcmpcsq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
24391 {
24392 return __arm_vcmpcsq_m_u16 (__a, __b, __p);
24393 }
24394
24395 __extension__ extern __inline mve_pred16_t
24396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24397 __arm_vcmpcsq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
24398 {
24399 return __arm_vcmpcsq_m_n_u16 (__a, __b, __p);
24400 }
24401
24402 __extension__ extern __inline uint16x8_t
24403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24404 __arm_vclzq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
24405 {
24406 return __arm_vclzq_m_u16 (__inactive, __a, __p);
24407 }
24408
24409 __extension__ extern __inline uint32_t
24410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24411 __arm_vaddvaq_p (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
24412 {
24413 return __arm_vaddvaq_p_u16 (__a, __b, __p);
24414 }
24415
24416 __extension__ extern __inline uint16x8_t
24417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24418 __arm_vsriq (uint16x8_t __a, uint16x8_t __b, const int __imm)
24419 {
24420 return __arm_vsriq_n_u16 (__a, __b, __imm);
24421 }
24422
24423 __extension__ extern __inline uint16x8_t
24424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24425 __arm_vsliq (uint16x8_t __a, uint16x8_t __b, const int __imm)
24426 {
24427 return __arm_vsliq_n_u16 (__a, __b, __imm);
24428 }
24429
24430 __extension__ extern __inline uint16x8_t
24431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24432 __arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24433 {
24434 return __arm_vshlq_m_r_u16 (__a, __b, __p);
24435 }
24436
24437 __extension__ extern __inline uint16x8_t
24438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24439 __arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24440 {
24441 return __arm_vrshlq_m_n_u16 (__a, __b, __p);
24442 }
24443
24444 __extension__ extern __inline uint16x8_t
24445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24446 __arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24447 {
24448 return __arm_vqshlq_m_r_u16 (__a, __b, __p);
24449 }
24450
24451 __extension__ extern __inline uint16x8_t
24452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24453 __arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
24454 {
24455 return __arm_vqrshlq_m_n_u16 (__a, __b, __p);
24456 }
24457
24458 __extension__ extern __inline uint16_t
24459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24460 __arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
24461 {
24462 return __arm_vminavq_p_s16 (__a, __b, __p);
24463 }
24464
24465 __extension__ extern __inline uint16x8_t
24466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24467 __arm_vminaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24468 {
24469 return __arm_vminaq_m_s16 (__a, __b, __p);
24470 }
24471
24472 __extension__ extern __inline uint16_t
24473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24474 __arm_vmaxavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
24475 {
24476 return __arm_vmaxavq_p_s16 (__a, __b, __p);
24477 }
24478
24479 __extension__ extern __inline uint16x8_t
24480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24481 __arm_vmaxaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24482 {
24483 return __arm_vmaxaq_m_s16 (__a, __b, __p);
24484 }
24485
24486 __extension__ extern __inline mve_pred16_t
24487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24488 __arm_vcmpneq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24489 {
24490 return __arm_vcmpneq_m_s16 (__a, __b, __p);
24491 }
24492
24493 __extension__ extern __inline mve_pred16_t
24494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24495 __arm_vcmpneq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24496 {
24497 return __arm_vcmpneq_m_n_s16 (__a, __b, __p);
24498 }
24499
24500 __extension__ extern __inline mve_pred16_t
24501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24502 __arm_vcmpltq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24503 {
24504 return __arm_vcmpltq_m_s16 (__a, __b, __p);
24505 }
24506
24507 __extension__ extern __inline mve_pred16_t
24508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24509 __arm_vcmpltq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24510 {
24511 return __arm_vcmpltq_m_n_s16 (__a, __b, __p);
24512 }
24513
24514 __extension__ extern __inline mve_pred16_t
24515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24516 __arm_vcmpleq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24517 {
24518 return __arm_vcmpleq_m_s16 (__a, __b, __p);
24519 }
24520
24521 __extension__ extern __inline mve_pred16_t
24522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24523 __arm_vcmpleq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24524 {
24525 return __arm_vcmpleq_m_n_s16 (__a, __b, __p);
24526 }
24527
24528 __extension__ extern __inline mve_pred16_t
24529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24530 __arm_vcmpgtq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24531 {
24532 return __arm_vcmpgtq_m_s16 (__a, __b, __p);
24533 }
24534
24535 __extension__ extern __inline mve_pred16_t
24536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24537 __arm_vcmpgtq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24538 {
24539 return __arm_vcmpgtq_m_n_s16 (__a, __b, __p);
24540 }
24541
24542 __extension__ extern __inline mve_pred16_t
24543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24544 __arm_vcmpgeq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24545 {
24546 return __arm_vcmpgeq_m_s16 (__a, __b, __p);
24547 }
24548
24549 __extension__ extern __inline mve_pred16_t
24550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24551 __arm_vcmpgeq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24552 {
24553 return __arm_vcmpgeq_m_n_s16 (__a, __b, __p);
24554 }
24555
24556 __extension__ extern __inline mve_pred16_t
24557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24558 __arm_vcmpeqq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24559 {
24560 return __arm_vcmpeqq_m_s16 (__a, __b, __p);
24561 }
24562
24563 __extension__ extern __inline mve_pred16_t
24564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24565 __arm_vcmpeqq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
24566 {
24567 return __arm_vcmpeqq_m_n_s16 (__a, __b, __p);
24568 }
24569
24570 __extension__ extern __inline int16x8_t
24571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24572 __arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24573 {
24574 return __arm_vshlq_m_r_s16 (__a, __b, __p);
24575 }
24576
24577 __extension__ extern __inline int16x8_t
24578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24579 __arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24580 {
24581 return __arm_vrshlq_m_n_s16 (__a, __b, __p);
24582 }
24583
24584 __extension__ extern __inline int16x8_t
24585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24586 __arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24587 {
24588 return __arm_vrev64q_m_s16 (__inactive, __a, __p);
24589 }
24590
24591 __extension__ extern __inline int16x8_t
24592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24593 __arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24594 {
24595 return __arm_vqshlq_m_r_s16 (__a, __b, __p);
24596 }
24597
24598 __extension__ extern __inline int16x8_t
24599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24600 __arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
24601 {
24602 return __arm_vqrshlq_m_n_s16 (__a, __b, __p);
24603 }
24604
24605 __extension__ extern __inline int16x8_t
24606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24607 __arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24608 {
24609 return __arm_vqnegq_m_s16 (__inactive, __a, __p);
24610 }
24611
24612 __extension__ extern __inline int16x8_t
24613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24614 __arm_vqabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24615 {
24616 return __arm_vqabsq_m_s16 (__inactive, __a, __p);
24617 }
24618
24619 __extension__ extern __inline int16x8_t
24620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24621 __arm_vnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24622 {
24623 return __arm_vnegq_m_s16 (__inactive, __a, __p);
24624 }
24625
24626 __extension__ extern __inline int16x8_t
24627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24628 __arm_vmvnq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24629 {
24630 return __arm_vmvnq_m_s16 (__inactive, __a, __p);
24631 }
24632
24633 __extension__ extern __inline int32_t
24634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24635 __arm_vmlsdavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24636 {
24637 return __arm_vmlsdavxq_p_s16 (__a, __b, __p);
24638 }
24639
24640 __extension__ extern __inline int32_t
24641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24642 __arm_vmlsdavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24643 {
24644 return __arm_vmlsdavq_p_s16 (__a, __b, __p);
24645 }
24646
24647 __extension__ extern __inline int32_t
24648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24649 __arm_vmladavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24650 {
24651 return __arm_vmladavxq_p_s16 (__a, __b, __p);
24652 }
24653
24654 __extension__ extern __inline int32_t
24655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24656 __arm_vmladavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
24657 {
24658 return __arm_vmladavq_p_s16 (__a, __b, __p);
24659 }
24660
24661 __extension__ extern __inline int16_t
24662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24663 __arm_vminvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
24664 {
24665 return __arm_vminvq_p_s16 (__a, __b, __p);
24666 }
24667
24668 __extension__ extern __inline int16_t
24669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24670 __arm_vmaxvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
24671 {
24672 return __arm_vmaxvq_p_s16 (__a, __b, __p);
24673 }
24674
24675 __extension__ extern __inline int16x8_t
24676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24677 __arm_vdupq_m (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
24678 {
24679 return __arm_vdupq_m_n_s16 (__inactive, __a, __p);
24680 }
24681
24682 __extension__ extern __inline int16x8_t
24683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24684 __arm_vclzq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24685 {
24686 return __arm_vclzq_m_s16 (__inactive, __a, __p);
24687 }
24688
24689 __extension__ extern __inline int16x8_t
24690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24691 __arm_vclsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24692 {
24693 return __arm_vclsq_m_s16 (__inactive, __a, __p);
24694 }
24695
24696 __extension__ extern __inline int32_t
24697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24698 __arm_vaddvaq_p (int32_t __a, int16x8_t __b, mve_pred16_t __p)
24699 {
24700 return __arm_vaddvaq_p_s16 (__a, __b, __p);
24701 }
24702
24703 __extension__ extern __inline int16x8_t
24704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24705 __arm_vabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
24706 {
24707 return __arm_vabsq_m_s16 (__inactive, __a, __p);
24708 }
24709
24710 __extension__ extern __inline int16x8_t
24711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24712 __arm_vqrdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24713 {
24714 return __arm_vqrdmlsdhxq_s16 (__inactive, __a, __b);
24715 }
24716
24717 __extension__ extern __inline int16x8_t
24718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24719 __arm_vqrdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24720 {
24721 return __arm_vqrdmlsdhq_s16 (__inactive, __a, __b);
24722 }
24723
24724 __extension__ extern __inline int16x8_t
24725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24726 __arm_vqrdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
24727 {
24728 return __arm_vqrdmlashq_n_s16 (__a, __b, __c);
24729 }
24730
24731 __extension__ extern __inline int16x8_t
24732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24733 __arm_vqdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
24734 {
24735 return __arm_vqdmlashq_n_s16 (__a, __b, __c);
24736 }
24737
24738 __extension__ extern __inline int16x8_t
24739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24740 __arm_vqrdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
24741 {
24742 return __arm_vqrdmlahq_n_s16 (__a, __b, __c);
24743 }
24744
24745 __extension__ extern __inline int16x8_t
24746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24747 __arm_vqrdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24748 {
24749 return __arm_vqrdmladhxq_s16 (__inactive, __a, __b);
24750 }
24751
24752 __extension__ extern __inline int16x8_t
24753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24754 __arm_vqrdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24755 {
24756 return __arm_vqrdmladhq_s16 (__inactive, __a, __b);
24757 }
24758
24759 __extension__ extern __inline int16x8_t
24760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24761 __arm_vqdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24762 {
24763 return __arm_vqdmlsdhxq_s16 (__inactive, __a, __b);
24764 }
24765
24766 __extension__ extern __inline int16x8_t
24767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24768 __arm_vqdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24769 {
24770 return __arm_vqdmlsdhq_s16 (__inactive, __a, __b);
24771 }
24772
24773 __extension__ extern __inline int16x8_t
24774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24775 __arm_vqdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
24776 {
24777 return __arm_vqdmlahq_n_s16 (__a, __b, __c);
24778 }
24779
24780 __extension__ extern __inline int16x8_t
24781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24782 __arm_vqdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24783 {
24784 return __arm_vqdmladhxq_s16 (__inactive, __a, __b);
24785 }
24786
24787 __extension__ extern __inline int16x8_t
24788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24789 __arm_vqdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
24790 {
24791 return __arm_vqdmladhq_s16 (__inactive, __a, __b);
24792 }
24793
24794 __extension__ extern __inline int32_t
24795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24796 __arm_vmlsdavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
24797 {
24798 return __arm_vmlsdavaxq_s16 (__a, __b, __c);
24799 }
24800
24801 __extension__ extern __inline int32_t
24802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24803 __arm_vmlsdavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
24804 {
24805 return __arm_vmlsdavaq_s16 (__a, __b, __c);
24806 }
24807
24808 __extension__ extern __inline int16x8_t
24809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24810 __arm_vmlasq (int16x8_t __a, int16x8_t __b, int16_t __c)
24811 {
24812 return __arm_vmlasq_n_s16 (__a, __b, __c);
24813 }
24814
24815 __extension__ extern __inline int16x8_t
24816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24817 __arm_vmlaq (int16x8_t __a, int16x8_t __b, int16_t __c)
24818 {
24819 return __arm_vmlaq_n_s16 (__a, __b, __c);
24820 }
24821
24822 __extension__ extern __inline int32_t
24823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24824 __arm_vmladavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
24825 {
24826 return __arm_vmladavaxq_s16 (__a, __b, __c);
24827 }
24828
24829 __extension__ extern __inline int32_t
24830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24831 __arm_vmladavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
24832 {
24833 return __arm_vmladavaq_s16 (__a, __b, __c);
24834 }
24835
24836 __extension__ extern __inline int16x8_t
24837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24838 __arm_vsriq (int16x8_t __a, int16x8_t __b, const int __imm)
24839 {
24840 return __arm_vsriq_n_s16 (__a, __b, __imm);
24841 }
24842
24843 __extension__ extern __inline int16x8_t
24844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24845 __arm_vsliq (int16x8_t __a, int16x8_t __b, const int __imm)
24846 {
24847 return __arm_vsliq_n_s16 (__a, __b, __imm);
24848 }
24849
24850 __extension__ extern __inline uint32x4_t
24851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24852 __arm_vpselq (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24853 {
24854 return __arm_vpselq_u32 (__a, __b, __p);
24855 }
24856
24857 __extension__ extern __inline int32x4_t
24858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24859 __arm_vpselq (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
24860 {
24861 return __arm_vpselq_s32 (__a, __b, __p);
24862 }
24863
24864 __extension__ extern __inline uint32x4_t
24865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24866 __arm_vrev64q_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24867 {
24868 return __arm_vrev64q_m_u32 (__inactive, __a, __p);
24869 }
24870
24871 __extension__ extern __inline uint32x4_t
24872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24873 __arm_vmvnq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24874 {
24875 return __arm_vmvnq_m_u32 (__inactive, __a, __p);
24876 }
24877
24878 __extension__ extern __inline uint32x4_t
24879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24880 __arm_vmlasq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
24881 {
24882 return __arm_vmlasq_n_u32 (__a, __b, __c);
24883 }
24884
24885 __extension__ extern __inline uint32x4_t
24886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24887 __arm_vmlaq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
24888 {
24889 return __arm_vmlaq_n_u32 (__a, __b, __c);
24890 }
24891
24892 __extension__ extern __inline uint32_t
24893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24894 __arm_vmladavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24895 {
24896 return __arm_vmladavq_p_u32 (__a, __b, __p);
24897 }
24898
24899 __extension__ extern __inline uint32_t
24900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24901 __arm_vmladavaq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
24902 {
24903 return __arm_vmladavaq_u32 (__a, __b, __c);
24904 }
24905
24906 __extension__ extern __inline uint32_t
24907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24908 __arm_vminvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24909 {
24910 return __arm_vminvq_p_u32 (__a, __b, __p);
24911 }
24912
24913 __extension__ extern __inline uint32_t
24914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24915 __arm_vmaxvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24916 {
24917 return __arm_vmaxvq_p_u32 (__a, __b, __p);
24918 }
24919
24920 __extension__ extern __inline uint32x4_t
24921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24922 __arm_vdupq_m (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
24923 {
24924 return __arm_vdupq_m_n_u32 (__inactive, __a, __p);
24925 }
24926
24927 __extension__ extern __inline mve_pred16_t
24928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24929 __arm_vcmpneq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24930 {
24931 return __arm_vcmpneq_m_u32 (__a, __b, __p);
24932 }
24933
24934 __extension__ extern __inline mve_pred16_t
24935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24936 __arm_vcmpneq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24937 {
24938 return __arm_vcmpneq_m_n_u32 (__a, __b, __p);
24939 }
24940
24941 __extension__ extern __inline mve_pred16_t
24942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24943 __arm_vcmphiq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24944 {
24945 return __arm_vcmphiq_m_u32 (__a, __b, __p);
24946 }
24947
24948 __extension__ extern __inline mve_pred16_t
24949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24950 __arm_vcmphiq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24951 {
24952 return __arm_vcmphiq_m_n_u32 (__a, __b, __p);
24953 }
24954
24955 __extension__ extern __inline mve_pred16_t
24956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24957 __arm_vcmpeqq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24958 {
24959 return __arm_vcmpeqq_m_u32 (__a, __b, __p);
24960 }
24961
24962 __extension__ extern __inline mve_pred16_t
24963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24964 __arm_vcmpeqq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24965 {
24966 return __arm_vcmpeqq_m_n_u32 (__a, __b, __p);
24967 }
24968
24969 __extension__ extern __inline mve_pred16_t
24970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24971 __arm_vcmpcsq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
24972 {
24973 return __arm_vcmpcsq_m_u32 (__a, __b, __p);
24974 }
24975
24976 __extension__ extern __inline mve_pred16_t
24977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24978 __arm_vcmpcsq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
24979 {
24980 return __arm_vcmpcsq_m_n_u32 (__a, __b, __p);
24981 }
24982
24983 __extension__ extern __inline uint32x4_t
24984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24985 __arm_vclzq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
24986 {
24987 return __arm_vclzq_m_u32 (__inactive, __a, __p);
24988 }
24989
24990 __extension__ extern __inline uint32_t
24991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24992 __arm_vaddvaq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
24993 {
24994 return __arm_vaddvaq_p_u32 (__a, __b, __p);
24995 }
24996
24997 __extension__ extern __inline uint32x4_t
24998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24999 __arm_vsriq (uint32x4_t __a, uint32x4_t __b, const int __imm)
25000 {
25001 return __arm_vsriq_n_u32 (__a, __b, __imm);
25002 }
25003
25004 __extension__ extern __inline uint32x4_t
25005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25006 __arm_vsliq (uint32x4_t __a, uint32x4_t __b, const int __imm)
25007 {
25008 return __arm_vsliq_n_u32 (__a, __b, __imm);
25009 }
25010
25011 __extension__ extern __inline uint32x4_t
25012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25013 __arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25014 {
25015 return __arm_vshlq_m_r_u32 (__a, __b, __p);
25016 }
25017
25018 __extension__ extern __inline uint32x4_t
25019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25020 __arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25021 {
25022 return __arm_vrshlq_m_n_u32 (__a, __b, __p);
25023 }
25024
25025 __extension__ extern __inline uint32x4_t
25026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25027 __arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25028 {
25029 return __arm_vqshlq_m_r_u32 (__a, __b, __p);
25030 }
25031
25032 __extension__ extern __inline uint32x4_t
25033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25034 __arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
25035 {
25036 return __arm_vqrshlq_m_n_u32 (__a, __b, __p);
25037 }
25038
25039 __extension__ extern __inline uint32_t
25040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25041 __arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
25042 {
25043 return __arm_vminavq_p_s32 (__a, __b, __p);
25044 }
25045
25046 __extension__ extern __inline uint32x4_t
25047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25048 __arm_vminaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25049 {
25050 return __arm_vminaq_m_s32 (__a, __b, __p);
25051 }
25052
25053 __extension__ extern __inline uint32_t
25054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25055 __arm_vmaxavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
25056 {
25057 return __arm_vmaxavq_p_s32 (__a, __b, __p);
25058 }
25059
25060 __extension__ extern __inline uint32x4_t
25061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25062 __arm_vmaxaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25063 {
25064 return __arm_vmaxaq_m_s32 (__a, __b, __p);
25065 }
25066
25067 __extension__ extern __inline mve_pred16_t
25068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25069 __arm_vcmpneq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25070 {
25071 return __arm_vcmpneq_m_s32 (__a, __b, __p);
25072 }
25073
25074 __extension__ extern __inline mve_pred16_t
25075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25076 __arm_vcmpneq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25077 {
25078 return __arm_vcmpneq_m_n_s32 (__a, __b, __p);
25079 }
25080
25081 __extension__ extern __inline mve_pred16_t
25082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25083 __arm_vcmpltq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25084 {
25085 return __arm_vcmpltq_m_s32 (__a, __b, __p);
25086 }
25087
25088 __extension__ extern __inline mve_pred16_t
25089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25090 __arm_vcmpltq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25091 {
25092 return __arm_vcmpltq_m_n_s32 (__a, __b, __p);
25093 }
25094
25095 __extension__ extern __inline mve_pred16_t
25096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25097 __arm_vcmpleq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25098 {
25099 return __arm_vcmpleq_m_s32 (__a, __b, __p);
25100 }
25101
25102 __extension__ extern __inline mve_pred16_t
25103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25104 __arm_vcmpleq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25105 {
25106 return __arm_vcmpleq_m_n_s32 (__a, __b, __p);
25107 }
25108
25109 __extension__ extern __inline mve_pred16_t
25110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25111 __arm_vcmpgtq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25112 {
25113 return __arm_vcmpgtq_m_s32 (__a, __b, __p);
25114 }
25115
25116 __extension__ extern __inline mve_pred16_t
25117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25118 __arm_vcmpgtq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25119 {
25120 return __arm_vcmpgtq_m_n_s32 (__a, __b, __p);
25121 }
25122
25123 __extension__ extern __inline mve_pred16_t
25124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25125 __arm_vcmpgeq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25126 {
25127 return __arm_vcmpgeq_m_s32 (__a, __b, __p);
25128 }
25129
25130 __extension__ extern __inline mve_pred16_t
25131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25132 __arm_vcmpgeq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25133 {
25134 return __arm_vcmpgeq_m_n_s32 (__a, __b, __p);
25135 }
25136
25137 __extension__ extern __inline mve_pred16_t
25138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25139 __arm_vcmpeqq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25140 {
25141 return __arm_vcmpeqq_m_s32 (__a, __b, __p);
25142 }
25143
25144 __extension__ extern __inline mve_pred16_t
25145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25146 __arm_vcmpeqq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25147 {
25148 return __arm_vcmpeqq_m_n_s32 (__a, __b, __p);
25149 }
25150
25151 __extension__ extern __inline int32x4_t
25152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25153 __arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25154 {
25155 return __arm_vshlq_m_r_s32 (__a, __b, __p);
25156 }
25157
25158 __extension__ extern __inline int32x4_t
25159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25160 __arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25161 {
25162 return __arm_vrshlq_m_n_s32 (__a, __b, __p);
25163 }
25164
25165 __extension__ extern __inline int32x4_t
25166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25167 __arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25168 {
25169 return __arm_vrev64q_m_s32 (__inactive, __a, __p);
25170 }
25171
25172 __extension__ extern __inline int32x4_t
25173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25174 __arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25175 {
25176 return __arm_vqshlq_m_r_s32 (__a, __b, __p);
25177 }
25178
25179 __extension__ extern __inline int32x4_t
25180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25181 __arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
25182 {
25183 return __arm_vqrshlq_m_n_s32 (__a, __b, __p);
25184 }
25185
25186 __extension__ extern __inline int32x4_t
25187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25188 __arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25189 {
25190 return __arm_vqnegq_m_s32 (__inactive, __a, __p);
25191 }
25192
25193 __extension__ extern __inline int32x4_t
25194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25195 __arm_vqabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25196 {
25197 return __arm_vqabsq_m_s32 (__inactive, __a, __p);
25198 }
25199
25200 __extension__ extern __inline int32x4_t
25201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25202 __arm_vnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25203 {
25204 return __arm_vnegq_m_s32 (__inactive, __a, __p);
25205 }
25206
25207 __extension__ extern __inline int32x4_t
25208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25209 __arm_vmvnq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25210 {
25211 return __arm_vmvnq_m_s32 (__inactive, __a, __p);
25212 }
25213
25214 __extension__ extern __inline int32_t
25215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25216 __arm_vmlsdavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25217 {
25218 return __arm_vmlsdavxq_p_s32 (__a, __b, __p);
25219 }
25220
25221 __extension__ extern __inline int32_t
25222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25223 __arm_vmlsdavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25224 {
25225 return __arm_vmlsdavq_p_s32 (__a, __b, __p);
25226 }
25227
25228 __extension__ extern __inline int32_t
25229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25230 __arm_vmladavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25231 {
25232 return __arm_vmladavxq_p_s32 (__a, __b, __p);
25233 }
25234
25235 __extension__ extern __inline int32_t
25236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25237 __arm_vmladavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25238 {
25239 return __arm_vmladavq_p_s32 (__a, __b, __p);
25240 }
25241
25242 __extension__ extern __inline int32_t
25243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25244 __arm_vminvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25245 {
25246 return __arm_vminvq_p_s32 (__a, __b, __p);
25247 }
25248
25249 __extension__ extern __inline int32_t
25250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25251 __arm_vmaxvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25252 {
25253 return __arm_vmaxvq_p_s32 (__a, __b, __p);
25254 }
25255
25256 __extension__ extern __inline int32x4_t
25257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25258 __arm_vdupq_m (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
25259 {
25260 return __arm_vdupq_m_n_s32 (__inactive, __a, __p);
25261 }
25262
25263 __extension__ extern __inline int32x4_t
25264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25265 __arm_vclzq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25266 {
25267 return __arm_vclzq_m_s32 (__inactive, __a, __p);
25268 }
25269
25270 __extension__ extern __inline int32x4_t
25271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25272 __arm_vclsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25273 {
25274 return __arm_vclsq_m_s32 (__inactive, __a, __p);
25275 }
25276
25277 __extension__ extern __inline int32_t
25278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25279 __arm_vaddvaq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
25280 {
25281 return __arm_vaddvaq_p_s32 (__a, __b, __p);
25282 }
25283
25284 __extension__ extern __inline int32x4_t
25285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25286 __arm_vabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
25287 {
25288 return __arm_vabsq_m_s32 (__inactive, __a, __p);
25289 }
25290
25291 __extension__ extern __inline int32x4_t
25292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25293 __arm_vqrdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25294 {
25295 return __arm_vqrdmlsdhxq_s32 (__inactive, __a, __b);
25296 }
25297
25298 __extension__ extern __inline int32x4_t
25299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25300 __arm_vqrdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25301 {
25302 return __arm_vqrdmlsdhq_s32 (__inactive, __a, __b);
25303 }
25304
25305 __extension__ extern __inline int32x4_t
25306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25307 __arm_vqrdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
25308 {
25309 return __arm_vqrdmlashq_n_s32 (__a, __b, __c);
25310 }
25311
25312 __extension__ extern __inline int32x4_t
25313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25314 __arm_vqdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
25315 {
25316 return __arm_vqdmlashq_n_s32 (__a, __b, __c);
25317 }
25318
25319 __extension__ extern __inline int32x4_t
25320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25321 __arm_vqrdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
25322 {
25323 return __arm_vqrdmlahq_n_s32 (__a, __b, __c);
25324 }
25325
25326 __extension__ extern __inline int32x4_t
25327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25328 __arm_vqrdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25329 {
25330 return __arm_vqrdmladhxq_s32 (__inactive, __a, __b);
25331 }
25332
25333 __extension__ extern __inline int32x4_t
25334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25335 __arm_vqrdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25336 {
25337 return __arm_vqrdmladhq_s32 (__inactive, __a, __b);
25338 }
25339
25340 __extension__ extern __inline int32x4_t
25341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25342 __arm_vqdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25343 {
25344 return __arm_vqdmlsdhxq_s32 (__inactive, __a, __b);
25345 }
25346
25347 __extension__ extern __inline int32x4_t
25348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25349 __arm_vqdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25350 {
25351 return __arm_vqdmlsdhq_s32 (__inactive, __a, __b);
25352 }
25353
25354 __extension__ extern __inline int32x4_t
25355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25356 __arm_vqdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
25357 {
25358 return __arm_vqdmlahq_n_s32 (__a, __b, __c);
25359 }
25360
25361 __extension__ extern __inline int32x4_t
25362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25363 __arm_vqdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25364 {
25365 return __arm_vqdmladhxq_s32 (__inactive, __a, __b);
25366 }
25367
25368 __extension__ extern __inline int32x4_t
25369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25370 __arm_vqdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
25371 {
25372 return __arm_vqdmladhq_s32 (__inactive, __a, __b);
25373 }
25374
25375 __extension__ extern __inline int32_t
25376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25377 __arm_vmlsdavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
25378 {
25379 return __arm_vmlsdavaxq_s32 (__a, __b, __c);
25380 }
25381
25382 __extension__ extern __inline int32_t
25383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25384 __arm_vmlsdavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
25385 {
25386 return __arm_vmlsdavaq_s32 (__a, __b, __c);
25387 }
25388
25389 __extension__ extern __inline int32x4_t
25390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25391 __arm_vmlasq (int32x4_t __a, int32x4_t __b, int32_t __c)
25392 {
25393 return __arm_vmlasq_n_s32 (__a, __b, __c);
25394 }
25395
25396 __extension__ extern __inline int32x4_t
25397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25398 __arm_vmlaq (int32x4_t __a, int32x4_t __b, int32_t __c)
25399 {
25400 return __arm_vmlaq_n_s32 (__a, __b, __c);
25401 }
25402
25403 __extension__ extern __inline int32_t
25404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25405 __arm_vmladavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
25406 {
25407 return __arm_vmladavaxq_s32 (__a, __b, __c);
25408 }
25409
25410 __extension__ extern __inline int32_t
25411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25412 __arm_vmladavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
25413 {
25414 return __arm_vmladavaq_s32 (__a, __b, __c);
25415 }
25416
25417 __extension__ extern __inline int32x4_t
25418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25419 __arm_vsriq (int32x4_t __a, int32x4_t __b, const int __imm)
25420 {
25421 return __arm_vsriq_n_s32 (__a, __b, __imm);
25422 }
25423
25424 __extension__ extern __inline int32x4_t
25425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25426 __arm_vsliq (int32x4_t __a, int32x4_t __b, const int __imm)
25427 {
25428 return __arm_vsliq_n_s32 (__a, __b, __imm);
25429 }
25430
25431 __extension__ extern __inline uint64x2_t
25432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25433 __arm_vpselq (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
25434 {
25435 return __arm_vpselq_u64 (__a, __b, __p);
25436 }
25437
25438 __extension__ extern __inline int64x2_t
25439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25440 __arm_vpselq (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
25441 {
25442 return __arm_vpselq_s64 (__a, __b, __p);
25443 }
25444
25445 __extension__ extern __inline int64_t
25446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25447 __arm_vrmlaldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25448 {
25449 return __arm_vrmlaldavhaxq_s32 (__a, __b, __c);
25450 }
25451
25452 __extension__ extern __inline int64_t
25453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25454 __arm_vrmlsldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25455 {
25456 return __arm_vrmlsldavhaq_s32 (__a, __b, __c);
25457 }
25458
25459 __extension__ extern __inline int64_t
25460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25461 __arm_vrmlsldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25462 {
25463 return __arm_vrmlsldavhaxq_s32 (__a, __b, __c);
25464 }
25465
25466 __extension__ extern __inline int64_t
25467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25468 __arm_vaddlvaq_p (int64_t __a, int32x4_t __b, mve_pred16_t __p)
25469 {
25470 return __arm_vaddlvaq_p_s32 (__a, __b, __p);
25471 }
25472
25473 __extension__ extern __inline int8x16_t
25474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25475 __arm_vrev16q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
25476 {
25477 return __arm_vrev16q_m_s8 (__inactive, __a, __p);
25478 }
25479
25480 __extension__ extern __inline int64_t
25481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25482 __arm_vrmlaldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25483 {
25484 return __arm_vrmlaldavhq_p_s32 (__a, __b, __p);
25485 }
25486
25487 __extension__ extern __inline int64_t
25488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25489 __arm_vrmlaldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25490 {
25491 return __arm_vrmlaldavhxq_p_s32 (__a, __b, __p);
25492 }
25493
25494 __extension__ extern __inline int64_t
25495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25496 __arm_vrmlsldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25497 {
25498 return __arm_vrmlsldavhq_p_s32 (__a, __b, __p);
25499 }
25500
25501 __extension__ extern __inline int64_t
25502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25503 __arm_vrmlsldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25504 {
25505 return __arm_vrmlsldavhxq_p_s32 (__a, __b, __p);
25506 }
25507
25508 __extension__ extern __inline uint64_t
25509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25510 __arm_vaddlvaq_p (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
25511 {
25512 return __arm_vaddlvaq_p_u32 (__a, __b, __p);
25513 }
25514
25515 __extension__ extern __inline uint8x16_t
25516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25517 __arm_vrev16q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25518 {
25519 return __arm_vrev16q_m_u8 (__inactive, __a, __p);
25520 }
25521
25522 __extension__ extern __inline uint64_t
25523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25524 __arm_vrmlaldavhq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
25525 {
25526 return __arm_vrmlaldavhq_p_u32 (__a, __b, __p);
25527 }
25528
25529 __extension__ extern __inline int16x8_t
25530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25531 __arm_vmvnq_m (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
25532 {
25533 return __arm_vmvnq_m_n_s16 (__inactive, __imm, __p);
25534 }
25535
25536 __extension__ extern __inline int16x8_t
25537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25538 __arm_vorrq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
25539 {
25540 return __arm_vorrq_m_n_s16 (__a, __imm, __p);
25541 }
25542
25543 __extension__ extern __inline int8x16_t
25544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25545 __arm_vqrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25546 {
25547 return __arm_vqrshrntq_n_s16 (__a, __b, __imm);
25548 }
25549
25550 __extension__ extern __inline int8x16_t
25551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25552 __arm_vqshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25553 {
25554 return __arm_vqshrnbq_n_s16 (__a, __b, __imm);
25555 }
25556
25557 __extension__ extern __inline int8x16_t
25558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25559 __arm_vqshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25560 {
25561 return __arm_vqshrntq_n_s16 (__a, __b, __imm);
25562 }
25563
25564 __extension__ extern __inline int8x16_t
25565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25566 __arm_vrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25567 {
25568 return __arm_vrshrnbq_n_s16 (__a, __b, __imm);
25569 }
25570
25571 __extension__ extern __inline int8x16_t
25572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25573 __arm_vrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25574 {
25575 return __arm_vrshrntq_n_s16 (__a, __b, __imm);
25576 }
25577
25578 __extension__ extern __inline int8x16_t
25579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25580 __arm_vshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
25581 {
25582 return __arm_vshrnbq_n_s16 (__a, __b, __imm);
25583 }
25584
25585 __extension__ extern __inline int8x16_t
25586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25587 __arm_vshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
25588 {
25589 return __arm_vshrntq_n_s16 (__a, __b, __imm);
25590 }
25591
25592 __extension__ extern __inline int64_t
25593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25594 __arm_vmlaldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
25595 {
25596 return __arm_vmlaldavaq_s16 (__a, __b, __c);
25597 }
25598
25599 __extension__ extern __inline int64_t
25600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25601 __arm_vmlaldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
25602 {
25603 return __arm_vmlaldavaxq_s16 (__a, __b, __c);
25604 }
25605
25606 __extension__ extern __inline int64_t
25607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25608 __arm_vmlsldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
25609 {
25610 return __arm_vmlsldavaq_s16 (__a, __b, __c);
25611 }
25612
25613 __extension__ extern __inline int64_t
25614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25615 __arm_vmlsldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
25616 {
25617 return __arm_vmlsldavaxq_s16 (__a, __b, __c);
25618 }
25619
25620 __extension__ extern __inline int64_t
25621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25622 __arm_vmlaldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25623 {
25624 return __arm_vmlaldavq_p_s16 (__a, __b, __p);
25625 }
25626
25627 __extension__ extern __inline int64_t
25628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25629 __arm_vmlaldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25630 {
25631 return __arm_vmlaldavxq_p_s16 (__a, __b, __p);
25632 }
25633
25634 __extension__ extern __inline int64_t
25635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25636 __arm_vmlsldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25637 {
25638 return __arm_vmlsldavq_p_s16 (__a, __b, __p);
25639 }
25640
25641 __extension__ extern __inline int64_t
25642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25643 __arm_vmlsldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
25644 {
25645 return __arm_vmlsldavxq_p_s16 (__a, __b, __p);
25646 }
25647
25648 __extension__ extern __inline int16x8_t
25649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25650 __arm_vmovlbq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
25651 {
25652 return __arm_vmovlbq_m_s8 (__inactive, __a, __p);
25653 }
25654
25655 __extension__ extern __inline int16x8_t
25656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25657 __arm_vmovltq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
25658 {
25659 return __arm_vmovltq_m_s8 (__inactive, __a, __p);
25660 }
25661
25662 __extension__ extern __inline int8x16_t
25663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25664 __arm_vmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25665 {
25666 return __arm_vmovnbq_m_s16 (__a, __b, __p);
25667 }
25668
25669 __extension__ extern __inline int8x16_t
25670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25671 __arm_vmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25672 {
25673 return __arm_vmovntq_m_s16 (__a, __b, __p);
25674 }
25675
25676 __extension__ extern __inline int8x16_t
25677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25678 __arm_vqmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25679 {
25680 return __arm_vqmovnbq_m_s16 (__a, __b, __p);
25681 }
25682
25683 __extension__ extern __inline int8x16_t
25684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25685 __arm_vqmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25686 {
25687 return __arm_vqmovntq_m_s16 (__a, __b, __p);
25688 }
25689
25690 __extension__ extern __inline int8x16_t
25691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25692 __arm_vrev32q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
25693 {
25694 return __arm_vrev32q_m_s8 (__inactive, __a, __p);
25695 }
25696
25697 __extension__ extern __inline uint16x8_t
25698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25699 __arm_vmvnq_m (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
25700 {
25701 return __arm_vmvnq_m_n_u16 (__inactive, __imm, __p);
25702 }
25703
25704 __extension__ extern __inline uint16x8_t
25705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25706 __arm_vorrq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
25707 {
25708 return __arm_vorrq_m_n_u16 (__a, __imm, __p);
25709 }
25710
25711 __extension__ extern __inline uint8x16_t
25712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25713 __arm_vqrshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
25714 {
25715 return __arm_vqrshruntq_n_s16 (__a, __b, __imm);
25716 }
25717
25718 __extension__ extern __inline uint8x16_t
25719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25720 __arm_vqshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
25721 {
25722 return __arm_vqshrunbq_n_s16 (__a, __b, __imm);
25723 }
25724
25725 __extension__ extern __inline uint8x16_t
25726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25727 __arm_vqshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
25728 {
25729 return __arm_vqshruntq_n_s16 (__a, __b, __imm);
25730 }
25731
25732 __extension__ extern __inline uint8x16_t
25733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25734 __arm_vqmovunbq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25735 {
25736 return __arm_vqmovunbq_m_s16 (__a, __b, __p);
25737 }
25738
25739 __extension__ extern __inline uint8x16_t
25740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25741 __arm_vqmovuntq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
25742 {
25743 return __arm_vqmovuntq_m_s16 (__a, __b, __p);
25744 }
25745
25746 __extension__ extern __inline uint8x16_t
25747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25748 __arm_vqrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25749 {
25750 return __arm_vqrshrntq_n_u16 (__a, __b, __imm);
25751 }
25752
25753 __extension__ extern __inline uint8x16_t
25754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25755 __arm_vqshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25756 {
25757 return __arm_vqshrnbq_n_u16 (__a, __b, __imm);
25758 }
25759
25760 __extension__ extern __inline uint8x16_t
25761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25762 __arm_vqshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25763 {
25764 return __arm_vqshrntq_n_u16 (__a, __b, __imm);
25765 }
25766
25767 __extension__ extern __inline uint8x16_t
25768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25769 __arm_vrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25770 {
25771 return __arm_vrshrnbq_n_u16 (__a, __b, __imm);
25772 }
25773
25774 __extension__ extern __inline uint8x16_t
25775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25776 __arm_vrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25777 {
25778 return __arm_vrshrntq_n_u16 (__a, __b, __imm);
25779 }
25780
25781 __extension__ extern __inline uint8x16_t
25782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25783 __arm_vshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25784 {
25785 return __arm_vshrnbq_n_u16 (__a, __b, __imm);
25786 }
25787
25788 __extension__ extern __inline uint8x16_t
25789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25790 __arm_vshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
25791 {
25792 return __arm_vshrntq_n_u16 (__a, __b, __imm);
25793 }
25794
25795 __extension__ extern __inline uint64_t
25796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25797 __arm_vmlaldavaq (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
25798 {
25799 return __arm_vmlaldavaq_u16 (__a, __b, __c);
25800 }
25801
25802 __extension__ extern __inline uint64_t
25803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25804 __arm_vmlaldavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
25805 {
25806 return __arm_vmlaldavq_p_u16 (__a, __b, __p);
25807 }
25808
25809 __extension__ extern __inline uint16x8_t
25810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25811 __arm_vmovlbq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25812 {
25813 return __arm_vmovlbq_m_u8 (__inactive, __a, __p);
25814 }
25815
25816 __extension__ extern __inline uint16x8_t
25817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25818 __arm_vmovltq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25819 {
25820 return __arm_vmovltq_m_u8 (__inactive, __a, __p);
25821 }
25822
25823 __extension__ extern __inline uint8x16_t
25824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25825 __arm_vmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25826 {
25827 return __arm_vmovnbq_m_u16 (__a, __b, __p);
25828 }
25829
25830 __extension__ extern __inline uint8x16_t
25831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25832 __arm_vmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25833 {
25834 return __arm_vmovntq_m_u16 (__a, __b, __p);
25835 }
25836
25837 __extension__ extern __inline uint8x16_t
25838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25839 __arm_vqmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25840 {
25841 return __arm_vqmovnbq_m_u16 (__a, __b, __p);
25842 }
25843
25844 __extension__ extern __inline uint8x16_t
25845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25846 __arm_vqmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
25847 {
25848 return __arm_vqmovntq_m_u16 (__a, __b, __p);
25849 }
25850
25851 __extension__ extern __inline uint8x16_t
25852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25853 __arm_vrev32q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
25854 {
25855 return __arm_vrev32q_m_u8 (__inactive, __a, __p);
25856 }
25857
25858 __extension__ extern __inline int32x4_t
25859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25860 __arm_vmvnq_m (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
25861 {
25862 return __arm_vmvnq_m_n_s32 (__inactive, __imm, __p);
25863 }
25864
25865 __extension__ extern __inline int32x4_t
25866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25867 __arm_vorrq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
25868 {
25869 return __arm_vorrq_m_n_s32 (__a, __imm, __p);
25870 }
25871
25872 __extension__ extern __inline int16x8_t
25873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25874 __arm_vqrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25875 {
25876 return __arm_vqrshrntq_n_s32 (__a, __b, __imm);
25877 }
25878
25879 __extension__ extern __inline int16x8_t
25880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25881 __arm_vqshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25882 {
25883 return __arm_vqshrnbq_n_s32 (__a, __b, __imm);
25884 }
25885
25886 __extension__ extern __inline int16x8_t
25887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25888 __arm_vqshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25889 {
25890 return __arm_vqshrntq_n_s32 (__a, __b, __imm);
25891 }
25892
25893 __extension__ extern __inline int16x8_t
25894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25895 __arm_vrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25896 {
25897 return __arm_vrshrnbq_n_s32 (__a, __b, __imm);
25898 }
25899
25900 __extension__ extern __inline int16x8_t
25901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25902 __arm_vrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25903 {
25904 return __arm_vrshrntq_n_s32 (__a, __b, __imm);
25905 }
25906
25907 __extension__ extern __inline int16x8_t
25908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25909 __arm_vshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
25910 {
25911 return __arm_vshrnbq_n_s32 (__a, __b, __imm);
25912 }
25913
25914 __extension__ extern __inline int16x8_t
25915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25916 __arm_vshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
25917 {
25918 return __arm_vshrntq_n_s32 (__a, __b, __imm);
25919 }
25920
25921 __extension__ extern __inline int64_t
25922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25923 __arm_vmlaldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25924 {
25925 return __arm_vmlaldavaq_s32 (__a, __b, __c);
25926 }
25927
25928 __extension__ extern __inline int64_t
25929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25930 __arm_vmlaldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25931 {
25932 return __arm_vmlaldavaxq_s32 (__a, __b, __c);
25933 }
25934
25935 __extension__ extern __inline int64_t
25936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25937 __arm_vmlsldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
25938 {
25939 return __arm_vmlsldavaq_s32 (__a, __b, __c);
25940 }
25941
25942 __extension__ extern __inline int64_t
25943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25944 __arm_vmlsldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
25945 {
25946 return __arm_vmlsldavaxq_s32 (__a, __b, __c);
25947 }
25948
25949 __extension__ extern __inline int64_t
25950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25951 __arm_vmlaldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25952 {
25953 return __arm_vmlaldavq_p_s32 (__a, __b, __p);
25954 }
25955
25956 __extension__ extern __inline int64_t
25957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25958 __arm_vmlaldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25959 {
25960 return __arm_vmlaldavxq_p_s32 (__a, __b, __p);
25961 }
25962
25963 __extension__ extern __inline int64_t
25964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25965 __arm_vmlsldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25966 {
25967 return __arm_vmlsldavq_p_s32 (__a, __b, __p);
25968 }
25969
25970 __extension__ extern __inline int64_t
25971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25972 __arm_vmlsldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
25973 {
25974 return __arm_vmlsldavxq_p_s32 (__a, __b, __p);
25975 }
25976
25977 __extension__ extern __inline int32x4_t
25978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25979 __arm_vmovlbq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
25980 {
25981 return __arm_vmovlbq_m_s16 (__inactive, __a, __p);
25982 }
25983
25984 __extension__ extern __inline int32x4_t
25985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25986 __arm_vmovltq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
25987 {
25988 return __arm_vmovltq_m_s16 (__inactive, __a, __p);
25989 }
25990
25991 __extension__ extern __inline int16x8_t
25992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25993 __arm_vmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
25994 {
25995 return __arm_vmovnbq_m_s32 (__a, __b, __p);
25996 }
25997
25998 __extension__ extern __inline int16x8_t
25999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26000 __arm_vmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26001 {
26002 return __arm_vmovntq_m_s32 (__a, __b, __p);
26003 }
26004
26005 __extension__ extern __inline int16x8_t
26006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26007 __arm_vqmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26008 {
26009 return __arm_vqmovnbq_m_s32 (__a, __b, __p);
26010 }
26011
26012 __extension__ extern __inline int16x8_t
26013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26014 __arm_vqmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26015 {
26016 return __arm_vqmovntq_m_s32 (__a, __b, __p);
26017 }
26018
26019 __extension__ extern __inline int16x8_t
26020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26021 __arm_vrev32q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
26022 {
26023 return __arm_vrev32q_m_s16 (__inactive, __a, __p);
26024 }
26025
26026 __extension__ extern __inline uint32x4_t
26027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26028 __arm_vmvnq_m (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
26029 {
26030 return __arm_vmvnq_m_n_u32 (__inactive, __imm, __p);
26031 }
26032
26033 __extension__ extern __inline uint32x4_t
26034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26035 __arm_vorrq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
26036 {
26037 return __arm_vorrq_m_n_u32 (__a, __imm, __p);
26038 }
26039
26040 __extension__ extern __inline uint16x8_t
26041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26042 __arm_vqrshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
26043 {
26044 return __arm_vqrshruntq_n_s32 (__a, __b, __imm);
26045 }
26046
26047 __extension__ extern __inline uint16x8_t
26048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26049 __arm_vqshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
26050 {
26051 return __arm_vqshrunbq_n_s32 (__a, __b, __imm);
26052 }
26053
26054 __extension__ extern __inline uint16x8_t
26055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26056 __arm_vqshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
26057 {
26058 return __arm_vqshruntq_n_s32 (__a, __b, __imm);
26059 }
26060
26061 __extension__ extern __inline uint16x8_t
26062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26063 __arm_vqmovunbq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26064 {
26065 return __arm_vqmovunbq_m_s32 (__a, __b, __p);
26066 }
26067
26068 __extension__ extern __inline uint16x8_t
26069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26070 __arm_vqmovuntq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
26071 {
26072 return __arm_vqmovuntq_m_s32 (__a, __b, __p);
26073 }
26074
26075 __extension__ extern __inline uint16x8_t
26076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26077 __arm_vqrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26078 {
26079 return __arm_vqrshrntq_n_u32 (__a, __b, __imm);
26080 }
26081
26082 __extension__ extern __inline uint16x8_t
26083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26084 __arm_vqshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26085 {
26086 return __arm_vqshrnbq_n_u32 (__a, __b, __imm);
26087 }
26088
26089 __extension__ extern __inline uint16x8_t
26090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26091 __arm_vqshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26092 {
26093 return __arm_vqshrntq_n_u32 (__a, __b, __imm);
26094 }
26095
26096 __extension__ extern __inline uint16x8_t
26097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26098 __arm_vrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26099 {
26100 return __arm_vrshrnbq_n_u32 (__a, __b, __imm);
26101 }
26102
26103 __extension__ extern __inline uint16x8_t
26104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26105 __arm_vrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26106 {
26107 return __arm_vrshrntq_n_u32 (__a, __b, __imm);
26108 }
26109
26110 __extension__ extern __inline uint16x8_t
26111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26112 __arm_vshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26113 {
26114 return __arm_vshrnbq_n_u32 (__a, __b, __imm);
26115 }
26116
26117 __extension__ extern __inline uint16x8_t
26118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26119 __arm_vshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
26120 {
26121 return __arm_vshrntq_n_u32 (__a, __b, __imm);
26122 }
26123
26124 __extension__ extern __inline uint64_t
26125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26126 __arm_vmlaldavaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
26127 {
26128 return __arm_vmlaldavaq_u32 (__a, __b, __c);
26129 }
26130
26131 __extension__ extern __inline uint64_t
26132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26133 __arm_vmlaldavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26134 {
26135 return __arm_vmlaldavq_p_u32 (__a, __b, __p);
26136 }
26137
26138 __extension__ extern __inline uint32x4_t
26139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26140 __arm_vmovlbq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26141 {
26142 return __arm_vmovlbq_m_u16 (__inactive, __a, __p);
26143 }
26144
26145 __extension__ extern __inline uint32x4_t
26146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26147 __arm_vmovltq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26148 {
26149 return __arm_vmovltq_m_u16 (__inactive, __a, __p);
26150 }
26151
26152 __extension__ extern __inline uint16x8_t
26153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26154 __arm_vmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26155 {
26156 return __arm_vmovnbq_m_u32 (__a, __b, __p);
26157 }
26158
26159 __extension__ extern __inline uint16x8_t
26160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26161 __arm_vmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26162 {
26163 return __arm_vmovntq_m_u32 (__a, __b, __p);
26164 }
26165
26166 __extension__ extern __inline uint16x8_t
26167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26168 __arm_vqmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26169 {
26170 return __arm_vqmovnbq_m_u32 (__a, __b, __p);
26171 }
26172
26173 __extension__ extern __inline uint16x8_t
26174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26175 __arm_vqmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
26176 {
26177 return __arm_vqmovntq_m_u32 (__a, __b, __p);
26178 }
26179
26180 __extension__ extern __inline uint16x8_t
26181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26182 __arm_vrev32q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
26183 {
26184 return __arm_vrev32q_m_u16 (__inactive, __a, __p);
26185 }
26186
26187 __extension__ extern __inline int8x16_t
26188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26189 __arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
26190 {
26191 return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p);
26192 }
26193
26194 __extension__ extern __inline int8x16_t
26195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26196 __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26197 {
26198 return __arm_vsubq_m_s8 (__inactive, __a, __b, __p);
26199 }
26200
26201 __extension__ extern __inline uint8x16_t
26202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26203 __arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
26204 {
26205 return __arm_vqshluq_m_n_s8 (__inactive, __a, __imm, __p);
26206 }
26207
26208 __extension__ extern __inline uint32_t
26209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26210 __arm_vabavq_p (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
26211 {
26212 return __arm_vabavq_p_s8 (__a, __b, __c, __p);
26213 }
26214
26215 __extension__ extern __inline uint8x16_t
26216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26217 __arm_vsriq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
26218 {
26219 return __arm_vsriq_m_n_u8 (__a, __b, __imm, __p);
26220 }
26221
26222 __extension__ extern __inline uint8x16_t
26223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26224 __arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26225 {
26226 return __arm_vshlq_m_u8 (__inactive, __a, __b, __p);
26227 }
26228
26229 __extension__ extern __inline uint8x16_t
26230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26231 __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26232 {
26233 return __arm_vsubq_m_u8 (__inactive, __a, __b, __p);
26234 }
26235
26236 __extension__ extern __inline uint32_t
26237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26238 __arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
26239 {
26240 return __arm_vabavq_p_u8 (__a, __b, __c, __p);
26241 }
26242
26243 __extension__ extern __inline int8x16_t
26244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26245 __arm_vshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26246 {
26247 return __arm_vshlq_m_s8 (__inactive, __a, __b, __p);
26248 }
26249
26250 __extension__ extern __inline int16x8_t
26251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26252 __arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
26253 {
26254 return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p);
26255 }
26256
26257 __extension__ extern __inline int16x8_t
26258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26259 __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26260 {
26261 return __arm_vsubq_m_s16 (__inactive, __a, __b, __p);
26262 }
26263
26264 __extension__ extern __inline uint16x8_t
26265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26266 __arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
26267 {
26268 return __arm_vqshluq_m_n_s16 (__inactive, __a, __imm, __p);
26269 }
26270
26271 __extension__ extern __inline uint32_t
26272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26273 __arm_vabavq_p (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
26274 {
26275 return __arm_vabavq_p_s16 (__a, __b, __c, __p);
26276 }
26277
26278 __extension__ extern __inline uint16x8_t
26279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26280 __arm_vsriq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
26281 {
26282 return __arm_vsriq_m_n_u16 (__a, __b, __imm, __p);
26283 }
26284
26285 __extension__ extern __inline uint16x8_t
26286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26287 __arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26288 {
26289 return __arm_vshlq_m_u16 (__inactive, __a, __b, __p);
26290 }
26291
26292 __extension__ extern __inline uint16x8_t
26293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26294 __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26295 {
26296 return __arm_vsubq_m_u16 (__inactive, __a, __b, __p);
26297 }
26298
26299 __extension__ extern __inline uint32_t
26300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26301 __arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
26302 {
26303 return __arm_vabavq_p_u16 (__a, __b, __c, __p);
26304 }
26305
26306 __extension__ extern __inline int16x8_t
26307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26308 __arm_vshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26309 {
26310 return __arm_vshlq_m_s16 (__inactive, __a, __b, __p);
26311 }
26312
26313 __extension__ extern __inline int32x4_t
26314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26315 __arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
26316 {
26317 return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p);
26318 }
26319
26320 __extension__ extern __inline int32x4_t
26321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26322 __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26323 {
26324 return __arm_vsubq_m_s32 (__inactive, __a, __b, __p);
26325 }
26326
26327 __extension__ extern __inline uint32x4_t
26328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26329 __arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
26330 {
26331 return __arm_vqshluq_m_n_s32 (__inactive, __a, __imm, __p);
26332 }
26333
26334 __extension__ extern __inline uint32_t
26335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26336 __arm_vabavq_p (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
26337 {
26338 return __arm_vabavq_p_s32 (__a, __b, __c, __p);
26339 }
26340
26341 __extension__ extern __inline uint32x4_t
26342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26343 __arm_vsriq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
26344 {
26345 return __arm_vsriq_m_n_u32 (__a, __b, __imm, __p);
26346 }
26347
26348 __extension__ extern __inline uint32x4_t
26349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26350 __arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26351 {
26352 return __arm_vshlq_m_u32 (__inactive, __a, __b, __p);
26353 }
26354
26355 __extension__ extern __inline uint32x4_t
26356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26357 __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26358 {
26359 return __arm_vsubq_m_u32 (__inactive, __a, __b, __p);
26360 }
26361
26362 __extension__ extern __inline uint32_t
26363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26364 __arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
26365 {
26366 return __arm_vabavq_p_u32 (__a, __b, __c, __p);
26367 }
26368
26369 __extension__ extern __inline int32x4_t
26370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26371 __arm_vshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26372 {
26373 return __arm_vshlq_m_s32 (__inactive, __a, __b, __p);
26374 }
26375
26376 __extension__ extern __inline int8x16_t
26377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26378 __arm_vabdq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26379 {
26380 return __arm_vabdq_m_s8 (__inactive, __a, __b, __p);
26381 }
26382
26383 __extension__ extern __inline int32x4_t
26384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26385 __arm_vabdq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26386 {
26387 return __arm_vabdq_m_s32 (__inactive, __a, __b, __p);
26388 }
26389
26390 __extension__ extern __inline int16x8_t
26391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26392 __arm_vabdq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26393 {
26394 return __arm_vabdq_m_s16 (__inactive, __a, __b, __p);
26395 }
26396
26397 __extension__ extern __inline uint8x16_t
26398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26399 __arm_vabdq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26400 {
26401 return __arm_vabdq_m_u8 (__inactive, __a, __b, __p);
26402 }
26403
26404 __extension__ extern __inline uint32x4_t
26405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26406 __arm_vabdq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26407 {
26408 return __arm_vabdq_m_u32 (__inactive, __a, __b, __p);
26409 }
26410
26411 __extension__ extern __inline uint16x8_t
26412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26413 __arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26414 {
26415 return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
26416 }
26417
26418 __extension__ extern __inline int8x16_t
26419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26420 __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int __b, mve_pred16_t __p)
26421 {
26422 return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p);
26423 }
26424
26425 __extension__ extern __inline int32x4_t
26426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26427 __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int __b, mve_pred16_t __p)
26428 {
26429 return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p);
26430 }
26431
26432 __extension__ extern __inline int16x8_t
26433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26434 __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int __b, mve_pred16_t __p)
26435 {
26436 return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p);
26437 }
26438
26439 __extension__ extern __inline uint8x16_t
26440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26441 __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, int __b, mve_pred16_t __p)
26442 {
26443 return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p);
26444 }
26445
26446 __extension__ extern __inline uint32x4_t
26447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26448 __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, int __b, mve_pred16_t __p)
26449 {
26450 return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p);
26451 }
26452
26453 __extension__ extern __inline uint16x8_t
26454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26455 __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, int __b, mve_pred16_t __p)
26456 {
26457 return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p);
26458 }
26459
26460 __extension__ extern __inline int8x16_t
26461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26462 __arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26463 {
26464 return __arm_vaddq_m_s8 (__inactive, __a, __b, __p);
26465 }
26466
26467 __extension__ extern __inline int32x4_t
26468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26469 __arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26470 {
26471 return __arm_vaddq_m_s32 (__inactive, __a, __b, __p);
26472 }
26473
26474 __extension__ extern __inline int16x8_t
26475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26476 __arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26477 {
26478 return __arm_vaddq_m_s16 (__inactive, __a, __b, __p);
26479 }
26480
26481 __extension__ extern __inline uint8x16_t
26482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26483 __arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26484 {
26485 return __arm_vaddq_m_u8 (__inactive, __a, __b, __p);
26486 }
26487
26488 __extension__ extern __inline uint32x4_t
26489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26490 __arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26491 {
26492 return __arm_vaddq_m_u32 (__inactive, __a, __b, __p);
26493 }
26494
26495 __extension__ extern __inline uint16x8_t
26496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26497 __arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26498 {
26499 return __arm_vaddq_m_u16 (__inactive, __a, __b, __p);
26500 }
26501
26502 __extension__ extern __inline int8x16_t
26503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26504 __arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26505 {
26506 return __arm_vandq_m_s8 (__inactive, __a, __b, __p);
26507 }
26508
26509 __extension__ extern __inline int32x4_t
26510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26511 __arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26512 {
26513 return __arm_vandq_m_s32 (__inactive, __a, __b, __p);
26514 }
26515
26516 __extension__ extern __inline int16x8_t
26517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26518 __arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26519 {
26520 return __arm_vandq_m_s16 (__inactive, __a, __b, __p);
26521 }
26522
26523 __extension__ extern __inline uint8x16_t
26524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26525 __arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26526 {
26527 return __arm_vandq_m_u8 (__inactive, __a, __b, __p);
26528 }
26529
26530 __extension__ extern __inline uint32x4_t
26531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26532 __arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26533 {
26534 return __arm_vandq_m_u32 (__inactive, __a, __b, __p);
26535 }
26536
26537 __extension__ extern __inline uint16x8_t
26538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26539 __arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26540 {
26541 return __arm_vandq_m_u16 (__inactive, __a, __b, __p);
26542 }
26543
26544 __extension__ extern __inline int8x16_t
26545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26546 __arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26547 {
26548 return __arm_vbicq_m_s8 (__inactive, __a, __b, __p);
26549 }
26550
26551 __extension__ extern __inline int32x4_t
26552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26553 __arm_vbicq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26554 {
26555 return __arm_vbicq_m_s32 (__inactive, __a, __b, __p);
26556 }
26557
26558 __extension__ extern __inline int16x8_t
26559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26560 __arm_vbicq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26561 {
26562 return __arm_vbicq_m_s16 (__inactive, __a, __b, __p);
26563 }
26564
26565 __extension__ extern __inline uint8x16_t
26566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26567 __arm_vbicq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26568 {
26569 return __arm_vbicq_m_u8 (__inactive, __a, __b, __p);
26570 }
26571
26572 __extension__ extern __inline uint32x4_t
26573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26574 __arm_vbicq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26575 {
26576 return __arm_vbicq_m_u32 (__inactive, __a, __b, __p);
26577 }
26578
26579 __extension__ extern __inline uint16x8_t
26580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26581 __arm_vbicq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26582 {
26583 return __arm_vbicq_m_u16 (__inactive, __a, __b, __p);
26584 }
26585
26586 __extension__ extern __inline int8x16_t
26587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26588 __arm_vbrsrq_m (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
26589 {
26590 return __arm_vbrsrq_m_n_s8 (__inactive, __a, __b, __p);
26591 }
26592
26593 __extension__ extern __inline int32x4_t
26594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26595 __arm_vbrsrq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26596 {
26597 return __arm_vbrsrq_m_n_s32 (__inactive, __a, __b, __p);
26598 }
26599
26600 __extension__ extern __inline int16x8_t
26601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26602 __arm_vbrsrq_m (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
26603 {
26604 return __arm_vbrsrq_m_n_s16 (__inactive, __a, __b, __p);
26605 }
26606
26607 __extension__ extern __inline uint8x16_t
26608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26609 __arm_vbrsrq_m (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
26610 {
26611 return __arm_vbrsrq_m_n_u8 (__inactive, __a, __b, __p);
26612 }
26613
26614 __extension__ extern __inline uint32x4_t
26615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26616 __arm_vbrsrq_m (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
26617 {
26618 return __arm_vbrsrq_m_n_u32 (__inactive, __a, __b, __p);
26619 }
26620
26621 __extension__ extern __inline uint16x8_t
26622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26623 __arm_vbrsrq_m (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
26624 {
26625 return __arm_vbrsrq_m_n_u16 (__inactive, __a, __b, __p);
26626 }
26627
26628 __extension__ extern __inline int8x16_t
26629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26630 __arm_vcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26631 {
26632 return __arm_vcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
26633 }
26634
26635 __extension__ extern __inline int32x4_t
26636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26637 __arm_vcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26638 {
26639 return __arm_vcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
26640 }
26641
26642 __extension__ extern __inline int16x8_t
26643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26644 __arm_vcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26645 {
26646 return __arm_vcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
26647 }
26648
26649 __extension__ extern __inline uint8x16_t
26650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26651 __arm_vcaddq_rot270_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26652 {
26653 return __arm_vcaddq_rot270_m_u8 (__inactive, __a, __b, __p);
26654 }
26655
26656 __extension__ extern __inline uint32x4_t
26657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26658 __arm_vcaddq_rot270_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26659 {
26660 return __arm_vcaddq_rot270_m_u32 (__inactive, __a, __b, __p);
26661 }
26662
26663 __extension__ extern __inline uint16x8_t
26664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26665 __arm_vcaddq_rot270_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26666 {
26667 return __arm_vcaddq_rot270_m_u16 (__inactive, __a, __b, __p);
26668 }
26669
26670 __extension__ extern __inline int8x16_t
26671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26672 __arm_vcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26673 {
26674 return __arm_vcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
26675 }
26676
26677 __extension__ extern __inline int32x4_t
26678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26679 __arm_vcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26680 {
26681 return __arm_vcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
26682 }
26683
26684 __extension__ extern __inline int16x8_t
26685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26686 __arm_vcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26687 {
26688 return __arm_vcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
26689 }
26690
26691 __extension__ extern __inline uint8x16_t
26692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26693 __arm_vcaddq_rot90_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26694 {
26695 return __arm_vcaddq_rot90_m_u8 (__inactive, __a, __b, __p);
26696 }
26697
26698 __extension__ extern __inline uint32x4_t
26699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26700 __arm_vcaddq_rot90_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26701 {
26702 return __arm_vcaddq_rot90_m_u32 (__inactive, __a, __b, __p);
26703 }
26704
26705 __extension__ extern __inline uint16x8_t
26706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26707 __arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26708 {
26709 return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p);
26710 }
26711
26712 __extension__ extern __inline int8x16_t
26713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26714 __arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26715 {
26716 return __arm_veorq_m_s8 (__inactive, __a, __b, __p);
26717 }
26718
26719 __extension__ extern __inline int32x4_t
26720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26721 __arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26722 {
26723 return __arm_veorq_m_s32 (__inactive, __a, __b, __p);
26724 }
26725
26726 __extension__ extern __inline int16x8_t
26727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26728 __arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26729 {
26730 return __arm_veorq_m_s16 (__inactive, __a, __b, __p);
26731 }
26732
26733 __extension__ extern __inline uint8x16_t
26734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26735 __arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26736 {
26737 return __arm_veorq_m_u8 (__inactive, __a, __b, __p);
26738 }
26739
26740 __extension__ extern __inline uint32x4_t
26741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26742 __arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26743 {
26744 return __arm_veorq_m_u32 (__inactive, __a, __b, __p);
26745 }
26746
26747 __extension__ extern __inline uint16x8_t
26748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26749 __arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26750 {
26751 return __arm_veorq_m_u16 (__inactive, __a, __b, __p);
26752 }
26753
26754 __extension__ extern __inline int8x16_t
26755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26756 __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
26757 {
26758 return __arm_vhaddq_m_n_s8 (__inactive, __a, __b, __p);
26759 }
26760
26761 __extension__ extern __inline int32x4_t
26762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26763 __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26764 {
26765 return __arm_vhaddq_m_n_s32 (__inactive, __a, __b, __p);
26766 }
26767
26768 __extension__ extern __inline int16x8_t
26769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26770 __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
26771 {
26772 return __arm_vhaddq_m_n_s16 (__inactive, __a, __b, __p);
26773 }
26774
26775 __extension__ extern __inline uint8x16_t
26776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26777 __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
26778 {
26779 return __arm_vhaddq_m_n_u8 (__inactive, __a, __b, __p);
26780 }
26781
26782 __extension__ extern __inline uint32x4_t
26783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26784 __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
26785 {
26786 return __arm_vhaddq_m_n_u32 (__inactive, __a, __b, __p);
26787 }
26788
26789 __extension__ extern __inline uint16x8_t
26790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26791 __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
26792 {
26793 return __arm_vhaddq_m_n_u16 (__inactive, __a, __b, __p);
26794 }
26795
26796 __extension__ extern __inline int8x16_t
26797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26798 __arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26799 {
26800 return __arm_vhaddq_m_s8 (__inactive, __a, __b, __p);
26801 }
26802
26803 __extension__ extern __inline int32x4_t
26804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26805 __arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26806 {
26807 return __arm_vhaddq_m_s32 (__inactive, __a, __b, __p);
26808 }
26809
26810 __extension__ extern __inline int16x8_t
26811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26812 __arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26813 {
26814 return __arm_vhaddq_m_s16 (__inactive, __a, __b, __p);
26815 }
26816
26817 __extension__ extern __inline uint8x16_t
26818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26819 __arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26820 {
26821 return __arm_vhaddq_m_u8 (__inactive, __a, __b, __p);
26822 }
26823
26824 __extension__ extern __inline uint32x4_t
26825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26826 __arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26827 {
26828 return __arm_vhaddq_m_u32 (__inactive, __a, __b, __p);
26829 }
26830
26831 __extension__ extern __inline uint16x8_t
26832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26833 __arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26834 {
26835 return __arm_vhaddq_m_u16 (__inactive, __a, __b, __p);
26836 }
26837
26838 __extension__ extern __inline int8x16_t
26839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26840 __arm_vhcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26841 {
26842 return __arm_vhcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
26843 }
26844
26845 __extension__ extern __inline int32x4_t
26846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26847 __arm_vhcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26848 {
26849 return __arm_vhcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
26850 }
26851
26852 __extension__ extern __inline int16x8_t
26853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26854 __arm_vhcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26855 {
26856 return __arm_vhcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
26857 }
26858
26859 __extension__ extern __inline int8x16_t
26860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26861 __arm_vhcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26862 {
26863 return __arm_vhcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
26864 }
26865
26866 __extension__ extern __inline int32x4_t
26867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26868 __arm_vhcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26869 {
26870 return __arm_vhcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
26871 }
26872
26873 __extension__ extern __inline int16x8_t
26874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26875 __arm_vhcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26876 {
26877 return __arm_vhcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
26878 }
26879
26880 __extension__ extern __inline int8x16_t
26881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26882 __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
26883 {
26884 return __arm_vhsubq_m_n_s8 (__inactive, __a, __b, __p);
26885 }
26886
26887 __extension__ extern __inline int32x4_t
26888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26889 __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
26890 {
26891 return __arm_vhsubq_m_n_s32 (__inactive, __a, __b, __p);
26892 }
26893
26894 __extension__ extern __inline int16x8_t
26895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26896 __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
26897 {
26898 return __arm_vhsubq_m_n_s16 (__inactive, __a, __b, __p);
26899 }
26900
26901 __extension__ extern __inline uint8x16_t
26902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26903 __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
26904 {
26905 return __arm_vhsubq_m_n_u8 (__inactive, __a, __b, __p);
26906 }
26907
26908 __extension__ extern __inline uint32x4_t
26909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26910 __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
26911 {
26912 return __arm_vhsubq_m_n_u32 (__inactive, __a, __b, __p);
26913 }
26914
26915 __extension__ extern __inline uint16x8_t
26916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26917 __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
26918 {
26919 return __arm_vhsubq_m_n_u16 (__inactive, __a, __b, __p);
26920 }
26921
26922 __extension__ extern __inline int8x16_t
26923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26924 __arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26925 {
26926 return __arm_vhsubq_m_s8 (__inactive, __a, __b, __p);
26927 }
26928
26929 __extension__ extern __inline int32x4_t
26930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26931 __arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26932 {
26933 return __arm_vhsubq_m_s32 (__inactive, __a, __b, __p);
26934 }
26935
26936 __extension__ extern __inline int16x8_t
26937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26938 __arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26939 {
26940 return __arm_vhsubq_m_s16 (__inactive, __a, __b, __p);
26941 }
26942
26943 __extension__ extern __inline uint8x16_t
26944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26945 __arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26946 {
26947 return __arm_vhsubq_m_u8 (__inactive, __a, __b, __p);
26948 }
26949
26950 __extension__ extern __inline uint32x4_t
26951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26952 __arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26953 {
26954 return __arm_vhsubq_m_u32 (__inactive, __a, __b, __p);
26955 }
26956
26957 __extension__ extern __inline uint16x8_t
26958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26959 __arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
26960 {
26961 return __arm_vhsubq_m_u16 (__inactive, __a, __b, __p);
26962 }
26963
26964 __extension__ extern __inline int8x16_t
26965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26966 __arm_vmaxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
26967 {
26968 return __arm_vmaxq_m_s8 (__inactive, __a, __b, __p);
26969 }
26970
26971 __extension__ extern __inline int32x4_t
26972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26973 __arm_vmaxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
26974 {
26975 return __arm_vmaxq_m_s32 (__inactive, __a, __b, __p);
26976 }
26977
26978 __extension__ extern __inline int16x8_t
26979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26980 __arm_vmaxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
26981 {
26982 return __arm_vmaxq_m_s16 (__inactive, __a, __b, __p);
26983 }
26984
26985 __extension__ extern __inline uint8x16_t
26986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26987 __arm_vmaxq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
26988 {
26989 return __arm_vmaxq_m_u8 (__inactive, __a, __b, __p);
26990 }
26991
26992 __extension__ extern __inline uint32x4_t
26993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26994 __arm_vmaxq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
26995 {
26996 return __arm_vmaxq_m_u32 (__inactive, __a, __b, __p);
26997 }
26998
26999 __extension__ extern __inline uint16x8_t
27000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27001 __arm_vmaxq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27002 {
27003 return __arm_vmaxq_m_u16 (__inactive, __a, __b, __p);
27004 }
27005
27006 __extension__ extern __inline int8x16_t
27007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27008 __arm_vminq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27009 {
27010 return __arm_vminq_m_s8 (__inactive, __a, __b, __p);
27011 }
27012
27013 __extension__ extern __inline int32x4_t
27014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27015 __arm_vminq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27016 {
27017 return __arm_vminq_m_s32 (__inactive, __a, __b, __p);
27018 }
27019
27020 __extension__ extern __inline int16x8_t
27021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27022 __arm_vminq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27023 {
27024 return __arm_vminq_m_s16 (__inactive, __a, __b, __p);
27025 }
27026
27027 __extension__ extern __inline uint8x16_t
27028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27029 __arm_vminq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27030 {
27031 return __arm_vminq_m_u8 (__inactive, __a, __b, __p);
27032 }
27033
27034 __extension__ extern __inline uint32x4_t
27035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27036 __arm_vminq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27037 {
27038 return __arm_vminq_m_u32 (__inactive, __a, __b, __p);
27039 }
27040
27041 __extension__ extern __inline uint16x8_t
27042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27043 __arm_vminq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27044 {
27045 return __arm_vminq_m_u16 (__inactive, __a, __b, __p);
27046 }
27047
27048 __extension__ extern __inline int32_t
27049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27050 __arm_vmladavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27051 {
27052 return __arm_vmladavaq_p_s8 (__a, __b, __c, __p);
27053 }
27054
27055 __extension__ extern __inline int32_t
27056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27057 __arm_vmladavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27058 {
27059 return __arm_vmladavaq_p_s32 (__a, __b, __c, __p);
27060 }
27061
27062 __extension__ extern __inline int32_t
27063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27064 __arm_vmladavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27065 {
27066 return __arm_vmladavaq_p_s16 (__a, __b, __c, __p);
27067 }
27068
27069 __extension__ extern __inline uint32_t
27070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27071 __arm_vmladavaq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
27072 {
27073 return __arm_vmladavaq_p_u8 (__a, __b, __c, __p);
27074 }
27075
27076 __extension__ extern __inline uint32_t
27077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27078 __arm_vmladavaq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
27079 {
27080 return __arm_vmladavaq_p_u32 (__a, __b, __c, __p);
27081 }
27082
27083 __extension__ extern __inline uint32_t
27084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27085 __arm_vmladavaq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
27086 {
27087 return __arm_vmladavaq_p_u16 (__a, __b, __c, __p);
27088 }
27089
27090 __extension__ extern __inline int32_t
27091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27092 __arm_vmladavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27093 {
27094 return __arm_vmladavaxq_p_s8 (__a, __b, __c, __p);
27095 }
27096
27097 __extension__ extern __inline int32_t
27098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27099 __arm_vmladavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27100 {
27101 return __arm_vmladavaxq_p_s32 (__a, __b, __c, __p);
27102 }
27103
27104 __extension__ extern __inline int32_t
27105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27106 __arm_vmladavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27107 {
27108 return __arm_vmladavaxq_p_s16 (__a, __b, __c, __p);
27109 }
27110
27111 __extension__ extern __inline int8x16_t
27112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27113 __arm_vmlaq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27114 {
27115 return __arm_vmlaq_m_n_s8 (__a, __b, __c, __p);
27116 }
27117
27118 __extension__ extern __inline int32x4_t
27119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27120 __arm_vmlaq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27121 {
27122 return __arm_vmlaq_m_n_s32 (__a, __b, __c, __p);
27123 }
27124
27125 __extension__ extern __inline int16x8_t
27126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27127 __arm_vmlaq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27128 {
27129 return __arm_vmlaq_m_n_s16 (__a, __b, __c, __p);
27130 }
27131
27132 __extension__ extern __inline uint8x16_t
27133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27134 __arm_vmlaq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
27135 {
27136 return __arm_vmlaq_m_n_u8 (__a, __b, __c, __p);
27137 }
27138
27139 __extension__ extern __inline uint32x4_t
27140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27141 __arm_vmlaq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
27142 {
27143 return __arm_vmlaq_m_n_u32 (__a, __b, __c, __p);
27144 }
27145
27146 __extension__ extern __inline uint16x8_t
27147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27148 __arm_vmlaq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
27149 {
27150 return __arm_vmlaq_m_n_u16 (__a, __b, __c, __p);
27151 }
27152
27153 __extension__ extern __inline int8x16_t
27154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27155 __arm_vmlasq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27156 {
27157 return __arm_vmlasq_m_n_s8 (__a, __b, __c, __p);
27158 }
27159
27160 __extension__ extern __inline int32x4_t
27161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27162 __arm_vmlasq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27163 {
27164 return __arm_vmlasq_m_n_s32 (__a, __b, __c, __p);
27165 }
27166
27167 __extension__ extern __inline int16x8_t
27168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27169 __arm_vmlasq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27170 {
27171 return __arm_vmlasq_m_n_s16 (__a, __b, __c, __p);
27172 }
27173
27174 __extension__ extern __inline uint8x16_t
27175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27176 __arm_vmlasq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
27177 {
27178 return __arm_vmlasq_m_n_u8 (__a, __b, __c, __p);
27179 }
27180
27181 __extension__ extern __inline uint32x4_t
27182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27183 __arm_vmlasq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
27184 {
27185 return __arm_vmlasq_m_n_u32 (__a, __b, __c, __p);
27186 }
27187
27188 __extension__ extern __inline uint16x8_t
27189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27190 __arm_vmlasq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
27191 {
27192 return __arm_vmlasq_m_n_u16 (__a, __b, __c, __p);
27193 }
27194
27195 __extension__ extern __inline int32_t
27196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27197 __arm_vmlsdavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27198 {
27199 return __arm_vmlsdavaq_p_s8 (__a, __b, __c, __p);
27200 }
27201
27202 __extension__ extern __inline int32_t
27203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27204 __arm_vmlsdavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27205 {
27206 return __arm_vmlsdavaq_p_s32 (__a, __b, __c, __p);
27207 }
27208
27209 __extension__ extern __inline int32_t
27210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27211 __arm_vmlsdavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27212 {
27213 return __arm_vmlsdavaq_p_s16 (__a, __b, __c, __p);
27214 }
27215
27216 __extension__ extern __inline int32_t
27217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27218 __arm_vmlsdavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
27219 {
27220 return __arm_vmlsdavaxq_p_s8 (__a, __b, __c, __p);
27221 }
27222
27223 __extension__ extern __inline int32_t
27224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27225 __arm_vmlsdavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
27226 {
27227 return __arm_vmlsdavaxq_p_s32 (__a, __b, __c, __p);
27228 }
27229
27230 __extension__ extern __inline int32_t
27231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27232 __arm_vmlsdavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
27233 {
27234 return __arm_vmlsdavaxq_p_s16 (__a, __b, __c, __p);
27235 }
27236
27237 __extension__ extern __inline int8x16_t
27238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27239 __arm_vmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27240 {
27241 return __arm_vmulhq_m_s8 (__inactive, __a, __b, __p);
27242 }
27243
27244 __extension__ extern __inline int32x4_t
27245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27246 __arm_vmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27247 {
27248 return __arm_vmulhq_m_s32 (__inactive, __a, __b, __p);
27249 }
27250
27251 __extension__ extern __inline int16x8_t
27252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27253 __arm_vmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27254 {
27255 return __arm_vmulhq_m_s16 (__inactive, __a, __b, __p);
27256 }
27257
27258 __extension__ extern __inline uint8x16_t
27259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27260 __arm_vmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27261 {
27262 return __arm_vmulhq_m_u8 (__inactive, __a, __b, __p);
27263 }
27264
27265 __extension__ extern __inline uint32x4_t
27266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27267 __arm_vmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27268 {
27269 return __arm_vmulhq_m_u32 (__inactive, __a, __b, __p);
27270 }
27271
27272 __extension__ extern __inline uint16x8_t
27273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27274 __arm_vmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27275 {
27276 return __arm_vmulhq_m_u16 (__inactive, __a, __b, __p);
27277 }
27278
27279 __extension__ extern __inline int16x8_t
27280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27281 __arm_vmullbq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27282 {
27283 return __arm_vmullbq_int_m_s8 (__inactive, __a, __b, __p);
27284 }
27285
27286 __extension__ extern __inline int64x2_t
27287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27288 __arm_vmullbq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27289 {
27290 return __arm_vmullbq_int_m_s32 (__inactive, __a, __b, __p);
27291 }
27292
27293 __extension__ extern __inline int32x4_t
27294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27295 __arm_vmullbq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27296 {
27297 return __arm_vmullbq_int_m_s16 (__inactive, __a, __b, __p);
27298 }
27299
27300 __extension__ extern __inline uint16x8_t
27301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27302 __arm_vmullbq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27303 {
27304 return __arm_vmullbq_int_m_u8 (__inactive, __a, __b, __p);
27305 }
27306
27307 __extension__ extern __inline uint64x2_t
27308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27309 __arm_vmullbq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27310 {
27311 return __arm_vmullbq_int_m_u32 (__inactive, __a, __b, __p);
27312 }
27313
27314 __extension__ extern __inline uint32x4_t
27315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27316 __arm_vmullbq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27317 {
27318 return __arm_vmullbq_int_m_u16 (__inactive, __a, __b, __p);
27319 }
27320
27321 __extension__ extern __inline int16x8_t
27322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27323 __arm_vmulltq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27324 {
27325 return __arm_vmulltq_int_m_s8 (__inactive, __a, __b, __p);
27326 }
27327
27328 __extension__ extern __inline int64x2_t
27329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27330 __arm_vmulltq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27331 {
27332 return __arm_vmulltq_int_m_s32 (__inactive, __a, __b, __p);
27333 }
27334
27335 __extension__ extern __inline int32x4_t
27336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27337 __arm_vmulltq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27338 {
27339 return __arm_vmulltq_int_m_s16 (__inactive, __a, __b, __p);
27340 }
27341
27342 __extension__ extern __inline uint16x8_t
27343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27344 __arm_vmulltq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27345 {
27346 return __arm_vmulltq_int_m_u8 (__inactive, __a, __b, __p);
27347 }
27348
27349 __extension__ extern __inline uint64x2_t
27350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27351 __arm_vmulltq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27352 {
27353 return __arm_vmulltq_int_m_u32 (__inactive, __a, __b, __p);
27354 }
27355
27356 __extension__ extern __inline uint32x4_t
27357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27358 __arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27359 {
27360 return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p);
27361 }
27362
27363 __extension__ extern __inline int8x16_t
27364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27365 __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27366 {
27367 return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p);
27368 }
27369
27370 __extension__ extern __inline int32x4_t
27371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27372 __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27373 {
27374 return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p);
27375 }
27376
27377 __extension__ extern __inline int16x8_t
27378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27379 __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27380 {
27381 return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p);
27382 }
27383
27384 __extension__ extern __inline uint8x16_t
27385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27386 __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
27387 {
27388 return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p);
27389 }
27390
27391 __extension__ extern __inline uint32x4_t
27392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27393 __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
27394 {
27395 return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p);
27396 }
27397
27398 __extension__ extern __inline uint16x8_t
27399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27400 __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
27401 {
27402 return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p);
27403 }
27404
27405 __extension__ extern __inline int8x16_t
27406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27407 __arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27408 {
27409 return __arm_vmulq_m_s8 (__inactive, __a, __b, __p);
27410 }
27411
27412 __extension__ extern __inline int32x4_t
27413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27414 __arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27415 {
27416 return __arm_vmulq_m_s32 (__inactive, __a, __b, __p);
27417 }
27418
27419 __extension__ extern __inline int16x8_t
27420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27421 __arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27422 {
27423 return __arm_vmulq_m_s16 (__inactive, __a, __b, __p);
27424 }
27425
27426 __extension__ extern __inline uint8x16_t
27427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27428 __arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27429 {
27430 return __arm_vmulq_m_u8 (__inactive, __a, __b, __p);
27431 }
27432
27433 __extension__ extern __inline uint32x4_t
27434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27435 __arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27436 {
27437 return __arm_vmulq_m_u32 (__inactive, __a, __b, __p);
27438 }
27439
27440 __extension__ extern __inline uint16x8_t
27441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27442 __arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27443 {
27444 return __arm_vmulq_m_u16 (__inactive, __a, __b, __p);
27445 }
27446
27447 __extension__ extern __inline int8x16_t
27448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27449 __arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27450 {
27451 return __arm_vornq_m_s8 (__inactive, __a, __b, __p);
27452 }
27453
27454 __extension__ extern __inline int32x4_t
27455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27456 __arm_vornq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27457 {
27458 return __arm_vornq_m_s32 (__inactive, __a, __b, __p);
27459 }
27460
27461 __extension__ extern __inline int16x8_t
27462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27463 __arm_vornq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27464 {
27465 return __arm_vornq_m_s16 (__inactive, __a, __b, __p);
27466 }
27467
27468 __extension__ extern __inline uint8x16_t
27469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27470 __arm_vornq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27471 {
27472 return __arm_vornq_m_u8 (__inactive, __a, __b, __p);
27473 }
27474
27475 __extension__ extern __inline uint32x4_t
27476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27477 __arm_vornq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27478 {
27479 return __arm_vornq_m_u32 (__inactive, __a, __b, __p);
27480 }
27481
27482 __extension__ extern __inline uint16x8_t
27483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27484 __arm_vornq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27485 {
27486 return __arm_vornq_m_u16 (__inactive, __a, __b, __p);
27487 }
27488
27489 __extension__ extern __inline int8x16_t
27490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27491 __arm_vorrq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27492 {
27493 return __arm_vorrq_m_s8 (__inactive, __a, __b, __p);
27494 }
27495
27496 __extension__ extern __inline int32x4_t
27497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27498 __arm_vorrq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27499 {
27500 return __arm_vorrq_m_s32 (__inactive, __a, __b, __p);
27501 }
27502
27503 __extension__ extern __inline int16x8_t
27504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27505 __arm_vorrq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27506 {
27507 return __arm_vorrq_m_s16 (__inactive, __a, __b, __p);
27508 }
27509
27510 __extension__ extern __inline uint8x16_t
27511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27512 __arm_vorrq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27513 {
27514 return __arm_vorrq_m_u8 (__inactive, __a, __b, __p);
27515 }
27516
27517 __extension__ extern __inline uint32x4_t
27518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27519 __arm_vorrq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27520 {
27521 return __arm_vorrq_m_u32 (__inactive, __a, __b, __p);
27522 }
27523
27524 __extension__ extern __inline uint16x8_t
27525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27526 __arm_vorrq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27527 {
27528 return __arm_vorrq_m_u16 (__inactive, __a, __b, __p);
27529 }
27530
27531 __extension__ extern __inline int8x16_t
27532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27533 __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27534 {
27535 return __arm_vqaddq_m_n_s8 (__inactive, __a, __b, __p);
27536 }
27537
27538 __extension__ extern __inline int32x4_t
27539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27540 __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27541 {
27542 return __arm_vqaddq_m_n_s32 (__inactive, __a, __b, __p);
27543 }
27544
27545 __extension__ extern __inline int16x8_t
27546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27547 __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27548 {
27549 return __arm_vqaddq_m_n_s16 (__inactive, __a, __b, __p);
27550 }
27551
27552 __extension__ extern __inline uint8x16_t
27553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27554 __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
27555 {
27556 return __arm_vqaddq_m_n_u8 (__inactive, __a, __b, __p);
27557 }
27558
27559 __extension__ extern __inline uint32x4_t
27560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27561 __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
27562 {
27563 return __arm_vqaddq_m_n_u32 (__inactive, __a, __b, __p);
27564 }
27565
27566 __extension__ extern __inline uint16x8_t
27567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27568 __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
27569 {
27570 return __arm_vqaddq_m_n_u16 (__inactive, __a, __b, __p);
27571 }
27572
27573 __extension__ extern __inline int8x16_t
27574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27575 __arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27576 {
27577 return __arm_vqaddq_m_s8 (__inactive, __a, __b, __p);
27578 }
27579
27580 __extension__ extern __inline int32x4_t
27581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27582 __arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27583 {
27584 return __arm_vqaddq_m_s32 (__inactive, __a, __b, __p);
27585 }
27586
27587 __extension__ extern __inline int16x8_t
27588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27589 __arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27590 {
27591 return __arm_vqaddq_m_s16 (__inactive, __a, __b, __p);
27592 }
27593
27594 __extension__ extern __inline uint8x16_t
27595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27596 __arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
27597 {
27598 return __arm_vqaddq_m_u8 (__inactive, __a, __b, __p);
27599 }
27600
27601 __extension__ extern __inline uint32x4_t
27602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27603 __arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
27604 {
27605 return __arm_vqaddq_m_u32 (__inactive, __a, __b, __p);
27606 }
27607
27608 __extension__ extern __inline uint16x8_t
27609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27610 __arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
27611 {
27612 return __arm_vqaddq_m_u16 (__inactive, __a, __b, __p);
27613 }
27614
27615 __extension__ extern __inline int8x16_t
27616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27617 __arm_vqdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27618 {
27619 return __arm_vqdmladhq_m_s8 (__inactive, __a, __b, __p);
27620 }
27621
27622 __extension__ extern __inline int32x4_t
27623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27624 __arm_vqdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27625 {
27626 return __arm_vqdmladhq_m_s32 (__inactive, __a, __b, __p);
27627 }
27628
27629 __extension__ extern __inline int16x8_t
27630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27631 __arm_vqdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27632 {
27633 return __arm_vqdmladhq_m_s16 (__inactive, __a, __b, __p);
27634 }
27635
27636 __extension__ extern __inline int8x16_t
27637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27638 __arm_vqdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27639 {
27640 return __arm_vqdmladhxq_m_s8 (__inactive, __a, __b, __p);
27641 }
27642
27643 __extension__ extern __inline int32x4_t
27644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27645 __arm_vqdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27646 {
27647 return __arm_vqdmladhxq_m_s32 (__inactive, __a, __b, __p);
27648 }
27649
27650 __extension__ extern __inline int16x8_t
27651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27652 __arm_vqdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27653 {
27654 return __arm_vqdmladhxq_m_s16 (__inactive, __a, __b, __p);
27655 }
27656
27657 __extension__ extern __inline int8x16_t
27658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27659 __arm_vqdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27660 {
27661 return __arm_vqdmlahq_m_n_s8 (__a, __b, __c, __p);
27662 }
27663
27664 __extension__ extern __inline int32x4_t
27665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27666 __arm_vqdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27667 {
27668 return __arm_vqdmlahq_m_n_s32 (__a, __b, __c, __p);
27669 }
27670
27671 __extension__ extern __inline int16x8_t
27672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27673 __arm_vqdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27674 {
27675 return __arm_vqdmlahq_m_n_s16 (__a, __b, __c, __p);
27676 }
27677
27678 __extension__ extern __inline int8x16_t
27679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27680 __arm_vqdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27681 {
27682 return __arm_vqdmlsdhq_m_s8 (__inactive, __a, __b, __p);
27683 }
27684
27685 __extension__ extern __inline int32x4_t
27686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27687 __arm_vqdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27688 {
27689 return __arm_vqdmlsdhq_m_s32 (__inactive, __a, __b, __p);
27690 }
27691
27692 __extension__ extern __inline int16x8_t
27693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27694 __arm_vqdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27695 {
27696 return __arm_vqdmlsdhq_m_s16 (__inactive, __a, __b, __p);
27697 }
27698
27699 __extension__ extern __inline int8x16_t
27700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27701 __arm_vqdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27702 {
27703 return __arm_vqdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
27704 }
27705
27706 __extension__ extern __inline int32x4_t
27707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27708 __arm_vqdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27709 {
27710 return __arm_vqdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
27711 }
27712
27713 __extension__ extern __inline int16x8_t
27714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27715 __arm_vqdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27716 {
27717 return __arm_vqdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
27718 }
27719
27720 __extension__ extern __inline int8x16_t
27721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27722 __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27723 {
27724 return __arm_vqdmulhq_m_n_s8 (__inactive, __a, __b, __p);
27725 }
27726
27727 __extension__ extern __inline int32x4_t
27728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27729 __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27730 {
27731 return __arm_vqdmulhq_m_n_s32 (__inactive, __a, __b, __p);
27732 }
27733
27734 __extension__ extern __inline int16x8_t
27735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27736 __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27737 {
27738 return __arm_vqdmulhq_m_n_s16 (__inactive, __a, __b, __p);
27739 }
27740
27741 __extension__ extern __inline int8x16_t
27742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27743 __arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27744 {
27745 return __arm_vqdmulhq_m_s8 (__inactive, __a, __b, __p);
27746 }
27747
27748 __extension__ extern __inline int32x4_t
27749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27750 __arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27751 {
27752 return __arm_vqdmulhq_m_s32 (__inactive, __a, __b, __p);
27753 }
27754
27755 __extension__ extern __inline int16x8_t
27756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27757 __arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27758 {
27759 return __arm_vqdmulhq_m_s16 (__inactive, __a, __b, __p);
27760 }
27761
27762 __extension__ extern __inline int8x16_t
27763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27764 __arm_vqrdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27765 {
27766 return __arm_vqrdmladhq_m_s8 (__inactive, __a, __b, __p);
27767 }
27768
27769 __extension__ extern __inline int32x4_t
27770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27771 __arm_vqrdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27772 {
27773 return __arm_vqrdmladhq_m_s32 (__inactive, __a, __b, __p);
27774 }
27775
27776 __extension__ extern __inline int16x8_t
27777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27778 __arm_vqrdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27779 {
27780 return __arm_vqrdmladhq_m_s16 (__inactive, __a, __b, __p);
27781 }
27782
27783 __extension__ extern __inline int8x16_t
27784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27785 __arm_vqrdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27786 {
27787 return __arm_vqrdmladhxq_m_s8 (__inactive, __a, __b, __p);
27788 }
27789
27790 __extension__ extern __inline int32x4_t
27791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27792 __arm_vqrdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27793 {
27794 return __arm_vqrdmladhxq_m_s32 (__inactive, __a, __b, __p);
27795 }
27796
27797 __extension__ extern __inline int16x8_t
27798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27799 __arm_vqrdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27800 {
27801 return __arm_vqrdmladhxq_m_s16 (__inactive, __a, __b, __p);
27802 }
27803
27804 __extension__ extern __inline int8x16_t
27805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27806 __arm_vqrdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27807 {
27808 return __arm_vqrdmlahq_m_n_s8 (__a, __b, __c, __p);
27809 }
27810
27811 __extension__ extern __inline int32x4_t
27812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27813 __arm_vqrdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27814 {
27815 return __arm_vqrdmlahq_m_n_s32 (__a, __b, __c, __p);
27816 }
27817
27818 __extension__ extern __inline int16x8_t
27819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27820 __arm_vqrdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27821 {
27822 return __arm_vqrdmlahq_m_n_s16 (__a, __b, __c, __p);
27823 }
27824
27825 __extension__ extern __inline int8x16_t
27826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27827 __arm_vqrdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27828 {
27829 return __arm_vqrdmlashq_m_n_s8 (__a, __b, __c, __p);
27830 }
27831
27832 __extension__ extern __inline int32x4_t
27833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27834 __arm_vqrdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27835 {
27836 return __arm_vqrdmlashq_m_n_s32 (__a, __b, __c, __p);
27837 }
27838
27839 __extension__ extern __inline int16x8_t
27840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27841 __arm_vqrdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27842 {
27843 return __arm_vqrdmlashq_m_n_s16 (__a, __b, __c, __p);
27844 }
27845
27846 __extension__ extern __inline int8x16_t
27847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27848 __arm_vqdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
27849 {
27850 return __arm_vqdmlashq_m_n_s8 (__a, __b, __c, __p);
27851 }
27852
27853 __extension__ extern __inline int16x8_t
27854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27855 __arm_vqdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
27856 {
27857 return __arm_vqdmlashq_m_n_s16 (__a, __b, __c, __p);
27858 }
27859
27860 __extension__ extern __inline int32x4_t
27861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27862 __arm_vqdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
27863 {
27864 return __arm_vqdmlashq_m_n_s32 (__a, __b, __c, __p);
27865 }
27866
27867 __extension__ extern __inline int8x16_t
27868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27869 __arm_vqrdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27870 {
27871 return __arm_vqrdmlsdhq_m_s8 (__inactive, __a, __b, __p);
27872 }
27873
27874 __extension__ extern __inline int32x4_t
27875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27876 __arm_vqrdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27877 {
27878 return __arm_vqrdmlsdhq_m_s32 (__inactive, __a, __b, __p);
27879 }
27880
27881 __extension__ extern __inline int16x8_t
27882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27883 __arm_vqrdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27884 {
27885 return __arm_vqrdmlsdhq_m_s16 (__inactive, __a, __b, __p);
27886 }
27887
27888 __extension__ extern __inline int8x16_t
27889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27890 __arm_vqrdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27891 {
27892 return __arm_vqrdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
27893 }
27894
27895 __extension__ extern __inline int32x4_t
27896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27897 __arm_vqrdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27898 {
27899 return __arm_vqrdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
27900 }
27901
27902 __extension__ extern __inline int16x8_t
27903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27904 __arm_vqrdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27905 {
27906 return __arm_vqrdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
27907 }
27908
27909 __extension__ extern __inline int8x16_t
27910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27911 __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
27912 {
27913 return __arm_vqrdmulhq_m_n_s8 (__inactive, __a, __b, __p);
27914 }
27915
27916 __extension__ extern __inline int32x4_t
27917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27918 __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
27919 {
27920 return __arm_vqrdmulhq_m_n_s32 (__inactive, __a, __b, __p);
27921 }
27922
27923 __extension__ extern __inline int16x8_t
27924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27925 __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
27926 {
27927 return __arm_vqrdmulhq_m_n_s16 (__inactive, __a, __b, __p);
27928 }
27929
27930 __extension__ extern __inline int8x16_t
27931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27932 __arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27933 {
27934 return __arm_vqrdmulhq_m_s8 (__inactive, __a, __b, __p);
27935 }
27936
27937 __extension__ extern __inline int32x4_t
27938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27939 __arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27940 {
27941 return __arm_vqrdmulhq_m_s32 (__inactive, __a, __b, __p);
27942 }
27943
27944 __extension__ extern __inline int16x8_t
27945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27946 __arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27947 {
27948 return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p);
27949 }
27950
27951 __extension__ extern __inline int8x16_t
27952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27953 __arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27954 {
27955 return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p);
27956 }
27957
27958 __extension__ extern __inline int32x4_t
27959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27960 __arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27961 {
27962 return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p);
27963 }
27964
27965 __extension__ extern __inline int16x8_t
27966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27967 __arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27968 {
27969 return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p);
27970 }
27971
27972 __extension__ extern __inline uint8x16_t
27973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27974 __arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
27975 {
27976 return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p);
27977 }
27978
27979 __extension__ extern __inline uint32x4_t
27980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27981 __arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
27982 {
27983 return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p);
27984 }
27985
27986 __extension__ extern __inline uint16x8_t
27987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27988 __arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
27989 {
27990 return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p);
27991 }
27992
27993 __extension__ extern __inline int8x16_t
27994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27995 __arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
27996 {
27997 return __arm_vqshlq_m_n_s8 (__inactive, __a, __imm, __p);
27998 }
27999
28000 __extension__ extern __inline int32x4_t
28001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28002 __arm_vqshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28003 {
28004 return __arm_vqshlq_m_n_s32 (__inactive, __a, __imm, __p);
28005 }
28006
28007 __extension__ extern __inline int16x8_t
28008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28009 __arm_vqshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28010 {
28011 return __arm_vqshlq_m_n_s16 (__inactive, __a, __imm, __p);
28012 }
28013
28014 __extension__ extern __inline uint8x16_t
28015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28016 __arm_vqshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28017 {
28018 return __arm_vqshlq_m_n_u8 (__inactive, __a, __imm, __p);
28019 }
28020
28021 __extension__ extern __inline uint32x4_t
28022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28023 __arm_vqshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28024 {
28025 return __arm_vqshlq_m_n_u32 (__inactive, __a, __imm, __p);
28026 }
28027
28028 __extension__ extern __inline uint16x8_t
28029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28030 __arm_vqshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28031 {
28032 return __arm_vqshlq_m_n_u16 (__inactive, __a, __imm, __p);
28033 }
28034
28035 __extension__ extern __inline int8x16_t
28036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28037 __arm_vqshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28038 {
28039 return __arm_vqshlq_m_s8 (__inactive, __a, __b, __p);
28040 }
28041
28042 __extension__ extern __inline int32x4_t
28043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28044 __arm_vqshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28045 {
28046 return __arm_vqshlq_m_s32 (__inactive, __a, __b, __p);
28047 }
28048
28049 __extension__ extern __inline int16x8_t
28050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28051 __arm_vqshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28052 {
28053 return __arm_vqshlq_m_s16 (__inactive, __a, __b, __p);
28054 }
28055
28056 __extension__ extern __inline uint8x16_t
28057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28058 __arm_vqshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28059 {
28060 return __arm_vqshlq_m_u8 (__inactive, __a, __b, __p);
28061 }
28062
28063 __extension__ extern __inline uint32x4_t
28064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28065 __arm_vqshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28066 {
28067 return __arm_vqshlq_m_u32 (__inactive, __a, __b, __p);
28068 }
28069
28070 __extension__ extern __inline uint16x8_t
28071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28072 __arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28073 {
28074 return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p);
28075 }
28076
28077 __extension__ extern __inline int8x16_t
28078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28079 __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
28080 {
28081 return __arm_vqsubq_m_n_s8 (__inactive, __a, __b, __p);
28082 }
28083
28084 __extension__ extern __inline int32x4_t
28085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28086 __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28087 {
28088 return __arm_vqsubq_m_n_s32 (__inactive, __a, __b, __p);
28089 }
28090
28091 __extension__ extern __inline int16x8_t
28092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28093 __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28094 {
28095 return __arm_vqsubq_m_n_s16 (__inactive, __a, __b, __p);
28096 }
28097
28098 __extension__ extern __inline uint8x16_t
28099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28100 __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
28101 {
28102 return __arm_vqsubq_m_n_u8 (__inactive, __a, __b, __p);
28103 }
28104
28105 __extension__ extern __inline uint32x4_t
28106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28107 __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
28108 {
28109 return __arm_vqsubq_m_n_u32 (__inactive, __a, __b, __p);
28110 }
28111
28112 __extension__ extern __inline uint16x8_t
28113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28114 __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
28115 {
28116 return __arm_vqsubq_m_n_u16 (__inactive, __a, __b, __p);
28117 }
28118
28119 __extension__ extern __inline int8x16_t
28120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28121 __arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28122 {
28123 return __arm_vqsubq_m_s8 (__inactive, __a, __b, __p);
28124 }
28125
28126 __extension__ extern __inline int32x4_t
28127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28128 __arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28129 {
28130 return __arm_vqsubq_m_s32 (__inactive, __a, __b, __p);
28131 }
28132
28133 __extension__ extern __inline int16x8_t
28134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28135 __arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28136 {
28137 return __arm_vqsubq_m_s16 (__inactive, __a, __b, __p);
28138 }
28139
28140 __extension__ extern __inline uint8x16_t
28141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28142 __arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28143 {
28144 return __arm_vqsubq_m_u8 (__inactive, __a, __b, __p);
28145 }
28146
28147 __extension__ extern __inline uint32x4_t
28148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28149 __arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28150 {
28151 return __arm_vqsubq_m_u32 (__inactive, __a, __b, __p);
28152 }
28153
28154 __extension__ extern __inline uint16x8_t
28155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28156 __arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28157 {
28158 return __arm_vqsubq_m_u16 (__inactive, __a, __b, __p);
28159 }
28160
28161 __extension__ extern __inline int8x16_t
28162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28163 __arm_vrhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28164 {
28165 return __arm_vrhaddq_m_s8 (__inactive, __a, __b, __p);
28166 }
28167
28168 __extension__ extern __inline int32x4_t
28169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28170 __arm_vrhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28171 {
28172 return __arm_vrhaddq_m_s32 (__inactive, __a, __b, __p);
28173 }
28174
28175 __extension__ extern __inline int16x8_t
28176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28177 __arm_vrhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28178 {
28179 return __arm_vrhaddq_m_s16 (__inactive, __a, __b, __p);
28180 }
28181
28182 __extension__ extern __inline uint8x16_t
28183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28184 __arm_vrhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28185 {
28186 return __arm_vrhaddq_m_u8 (__inactive, __a, __b, __p);
28187 }
28188
28189 __extension__ extern __inline uint32x4_t
28190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28191 __arm_vrhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28192 {
28193 return __arm_vrhaddq_m_u32 (__inactive, __a, __b, __p);
28194 }
28195
28196 __extension__ extern __inline uint16x8_t
28197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28198 __arm_vrhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28199 {
28200 return __arm_vrhaddq_m_u16 (__inactive, __a, __b, __p);
28201 }
28202
28203 __extension__ extern __inline int8x16_t
28204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28205 __arm_vrmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28206 {
28207 return __arm_vrmulhq_m_s8 (__inactive, __a, __b, __p);
28208 }
28209
28210 __extension__ extern __inline int32x4_t
28211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28212 __arm_vrmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28213 {
28214 return __arm_vrmulhq_m_s32 (__inactive, __a, __b, __p);
28215 }
28216
28217 __extension__ extern __inline int16x8_t
28218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28219 __arm_vrmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28220 {
28221 return __arm_vrmulhq_m_s16 (__inactive, __a, __b, __p);
28222 }
28223
28224 __extension__ extern __inline uint8x16_t
28225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28226 __arm_vrmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28227 {
28228 return __arm_vrmulhq_m_u8 (__inactive, __a, __b, __p);
28229 }
28230
28231 __extension__ extern __inline uint32x4_t
28232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28233 __arm_vrmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
28234 {
28235 return __arm_vrmulhq_m_u32 (__inactive, __a, __b, __p);
28236 }
28237
28238 __extension__ extern __inline uint16x8_t
28239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28240 __arm_vrmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28241 {
28242 return __arm_vrmulhq_m_u16 (__inactive, __a, __b, __p);
28243 }
28244
28245 __extension__ extern __inline int8x16_t
28246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28247 __arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28248 {
28249 return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p);
28250 }
28251
28252 __extension__ extern __inline int32x4_t
28253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28254 __arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28255 {
28256 return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p);
28257 }
28258
28259 __extension__ extern __inline int16x8_t
28260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28261 __arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28262 {
28263 return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p);
28264 }
28265
28266 __extension__ extern __inline uint8x16_t
28267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28268 __arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
28269 {
28270 return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p);
28271 }
28272
28273 __extension__ extern __inline uint32x4_t
28274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28275 __arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28276 {
28277 return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p);
28278 }
28279
28280 __extension__ extern __inline uint16x8_t
28281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28282 __arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28283 {
28284 return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p);
28285 }
28286
28287 __extension__ extern __inline int8x16_t
28288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28289 __arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28290 {
28291 return __arm_vrshrq_m_n_s8 (__inactive, __a, __imm, __p);
28292 }
28293
28294 __extension__ extern __inline int32x4_t
28295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28296 __arm_vrshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28297 {
28298 return __arm_vrshrq_m_n_s32 (__inactive, __a, __imm, __p);
28299 }
28300
28301 __extension__ extern __inline int16x8_t
28302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28303 __arm_vrshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28304 {
28305 return __arm_vrshrq_m_n_s16 (__inactive, __a, __imm, __p);
28306 }
28307
28308 __extension__ extern __inline uint8x16_t
28309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28310 __arm_vrshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28311 {
28312 return __arm_vrshrq_m_n_u8 (__inactive, __a, __imm, __p);
28313 }
28314
28315 __extension__ extern __inline uint32x4_t
28316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28317 __arm_vrshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28318 {
28319 return __arm_vrshrq_m_n_u32 (__inactive, __a, __imm, __p);
28320 }
28321
28322 __extension__ extern __inline uint16x8_t
28323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28324 __arm_vrshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28325 {
28326 return __arm_vrshrq_m_n_u16 (__inactive, __a, __imm, __p);
28327 }
28328
28329 __extension__ extern __inline int8x16_t
28330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28331 __arm_vshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28332 {
28333 return __arm_vshlq_m_n_s8 (__inactive, __a, __imm, __p);
28334 }
28335
28336 __extension__ extern __inline int32x4_t
28337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28338 __arm_vshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28339 {
28340 return __arm_vshlq_m_n_s32 (__inactive, __a, __imm, __p);
28341 }
28342
28343 __extension__ extern __inline int16x8_t
28344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28345 __arm_vshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28346 {
28347 return __arm_vshlq_m_n_s16 (__inactive, __a, __imm, __p);
28348 }
28349
28350 __extension__ extern __inline uint8x16_t
28351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28352 __arm_vshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28353 {
28354 return __arm_vshlq_m_n_u8 (__inactive, __a, __imm, __p);
28355 }
28356
28357 __extension__ extern __inline uint32x4_t
28358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28359 __arm_vshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28360 {
28361 return __arm_vshlq_m_n_u32 (__inactive, __a, __imm, __p);
28362 }
28363
28364 __extension__ extern __inline uint16x8_t
28365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28366 __arm_vshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28367 {
28368 return __arm_vshlq_m_n_u16 (__inactive, __a, __imm, __p);
28369 }
28370
28371 __extension__ extern __inline int8x16_t
28372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28373 __arm_vshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28374 {
28375 return __arm_vshrq_m_n_s8 (__inactive, __a, __imm, __p);
28376 }
28377
28378 __extension__ extern __inline int32x4_t
28379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28380 __arm_vshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
28381 {
28382 return __arm_vshrq_m_n_s32 (__inactive, __a, __imm, __p);
28383 }
28384
28385 __extension__ extern __inline int16x8_t
28386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28387 __arm_vshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28388 {
28389 return __arm_vshrq_m_n_s16 (__inactive, __a, __imm, __p);
28390 }
28391
28392 __extension__ extern __inline uint8x16_t
28393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28394 __arm_vshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28395 {
28396 return __arm_vshrq_m_n_u8 (__inactive, __a, __imm, __p);
28397 }
28398
28399 __extension__ extern __inline uint32x4_t
28400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28401 __arm_vshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
28402 {
28403 return __arm_vshrq_m_n_u32 (__inactive, __a, __imm, __p);
28404 }
28405
28406 __extension__ extern __inline uint16x8_t
28407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28408 __arm_vshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28409 {
28410 return __arm_vshrq_m_n_u16 (__inactive, __a, __imm, __p);
28411 }
28412
28413 __extension__ extern __inline int8x16_t
28414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28415 __arm_vsliq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
28416 {
28417 return __arm_vsliq_m_n_s8 (__a, __b, __imm, __p);
28418 }
28419
28420 __extension__ extern __inline int32x4_t
28421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28422 __arm_vsliq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28423 {
28424 return __arm_vsliq_m_n_s32 (__a, __b, __imm, __p);
28425 }
28426
28427 __extension__ extern __inline int16x8_t
28428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28429 __arm_vsliq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28430 {
28431 return __arm_vsliq_m_n_s16 (__a, __b, __imm, __p);
28432 }
28433
28434 __extension__ extern __inline uint8x16_t
28435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28436 __arm_vsliq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
28437 {
28438 return __arm_vsliq_m_n_u8 (__a, __b, __imm, __p);
28439 }
28440
28441 __extension__ extern __inline uint32x4_t
28442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28443 __arm_vsliq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28444 {
28445 return __arm_vsliq_m_n_u32 (__a, __b, __imm, __p);
28446 }
28447
28448 __extension__ extern __inline uint16x8_t
28449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28450 __arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28451 {
28452 return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p);
28453 }
28454
28455 __extension__ extern __inline int8x16_t
28456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28457 __arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
28458 {
28459 return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p);
28460 }
28461
28462 __extension__ extern __inline int32x4_t
28463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28464 __arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28465 {
28466 return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p);
28467 }
28468
28469 __extension__ extern __inline int16x8_t
28470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28471 __arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28472 {
28473 return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p);
28474 }
28475
28476 __extension__ extern __inline uint8x16_t
28477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28478 __arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
28479 {
28480 return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p);
28481 }
28482
28483 __extension__ extern __inline uint32x4_t
28484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28485 __arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
28486 {
28487 return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p);
28488 }
28489
28490 __extension__ extern __inline uint16x8_t
28491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28492 __arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
28493 {
28494 return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p);
28495 }
28496
28497 __extension__ extern __inline int64_t
28498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28499 __arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28500 {
28501 return __arm_vmlaldavaq_p_s32 (__a, __b, __c, __p);
28502 }
28503
28504 __extension__ extern __inline int64_t
28505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28506 __arm_vmlaldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28507 {
28508 return __arm_vmlaldavaq_p_s16 (__a, __b, __c, __p);
28509 }
28510
28511 __extension__ extern __inline uint64_t
28512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28513 __arm_vmlaldavaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
28514 {
28515 return __arm_vmlaldavaq_p_u32 (__a, __b, __c, __p);
28516 }
28517
28518 __extension__ extern __inline uint64_t
28519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28520 __arm_vmlaldavaq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
28521 {
28522 return __arm_vmlaldavaq_p_u16 (__a, __b, __c, __p);
28523 }
28524
28525 __extension__ extern __inline int64_t
28526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28527 __arm_vmlaldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28528 {
28529 return __arm_vmlaldavaxq_p_s32 (__a, __b, __c, __p);
28530 }
28531
28532 __extension__ extern __inline int64_t
28533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28534 __arm_vmlaldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28535 {
28536 return __arm_vmlaldavaxq_p_s16 (__a, __b, __c, __p);
28537 }
28538
28539 __extension__ extern __inline int64_t
28540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28541 __arm_vmlsldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28542 {
28543 return __arm_vmlsldavaq_p_s32 (__a, __b, __c, __p);
28544 }
28545
28546 __extension__ extern __inline int64_t
28547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28548 __arm_vmlsldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28549 {
28550 return __arm_vmlsldavaq_p_s16 (__a, __b, __c, __p);
28551 }
28552
28553 __extension__ extern __inline int64_t
28554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28555 __arm_vmlsldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28556 {
28557 return __arm_vmlsldavaxq_p_s32 (__a, __b, __c, __p);
28558 }
28559
28560 __extension__ extern __inline int64_t
28561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28562 __arm_vmlsldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
28563 {
28564 return __arm_vmlsldavaxq_p_s16 (__a, __b, __c, __p);
28565 }
28566
28567 __extension__ extern __inline uint16x8_t
28568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28569 __arm_vmullbq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28570 {
28571 return __arm_vmullbq_poly_m_p8 (__inactive, __a, __b, __p);
28572 }
28573
28574 __extension__ extern __inline uint32x4_t
28575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28576 __arm_vmullbq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28577 {
28578 return __arm_vmullbq_poly_m_p16 (__inactive, __a, __b, __p);
28579 }
28580
28581 __extension__ extern __inline uint16x8_t
28582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28583 __arm_vmulltq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
28584 {
28585 return __arm_vmulltq_poly_m_p8 (__inactive, __a, __b, __p);
28586 }
28587
28588 __extension__ extern __inline uint32x4_t
28589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28590 __arm_vmulltq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
28591 {
28592 return __arm_vmulltq_poly_m_p16 (__inactive, __a, __b, __p);
28593 }
28594
28595 __extension__ extern __inline int64x2_t
28596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28597 __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28598 {
28599 return __arm_vqdmullbq_m_n_s32 (__inactive, __a, __b, __p);
28600 }
28601
28602 __extension__ extern __inline int32x4_t
28603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28604 __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28605 {
28606 return __arm_vqdmullbq_m_n_s16 (__inactive, __a, __b, __p);
28607 }
28608
28609 __extension__ extern __inline int64x2_t
28610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28611 __arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28612 {
28613 return __arm_vqdmullbq_m_s32 (__inactive, __a, __b, __p);
28614 }
28615
28616 __extension__ extern __inline int32x4_t
28617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28618 __arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28619 {
28620 return __arm_vqdmullbq_m_s16 (__inactive, __a, __b, __p);
28621 }
28622
28623 __extension__ extern __inline int64x2_t
28624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28625 __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
28626 {
28627 return __arm_vqdmulltq_m_n_s32 (__inactive, __a, __b, __p);
28628 }
28629
28630 __extension__ extern __inline int32x4_t
28631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28632 __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
28633 {
28634 return __arm_vqdmulltq_m_n_s16 (__inactive, __a, __b, __p);
28635 }
28636
28637 __extension__ extern __inline int64x2_t
28638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28639 __arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
28640 {
28641 return __arm_vqdmulltq_m_s32 (__inactive, __a, __b, __p);
28642 }
28643
28644 __extension__ extern __inline int32x4_t
28645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28646 __arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
28647 {
28648 return __arm_vqdmulltq_m_s16 (__inactive, __a, __b, __p);
28649 }
28650
28651 __extension__ extern __inline int16x8_t
28652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28653 __arm_vqrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28654 {
28655 return __arm_vqrshrnbq_m_n_s32 (__a, __b, __imm, __p);
28656 }
28657
28658 __extension__ extern __inline int8x16_t
28659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28660 __arm_vqrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28661 {
28662 return __arm_vqrshrnbq_m_n_s16 (__a, __b, __imm, __p);
28663 }
28664
28665 __extension__ extern __inline uint16x8_t
28666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28667 __arm_vqrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28668 {
28669 return __arm_vqrshrnbq_m_n_u32 (__a, __b, __imm, __p);
28670 }
28671
28672 __extension__ extern __inline uint8x16_t
28673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28674 __arm_vqrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28675 {
28676 return __arm_vqrshrnbq_m_n_u16 (__a, __b, __imm, __p);
28677 }
28678
28679 __extension__ extern __inline int16x8_t
28680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28681 __arm_vqrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28682 {
28683 return __arm_vqrshrntq_m_n_s32 (__a, __b, __imm, __p);
28684 }
28685
28686 __extension__ extern __inline int8x16_t
28687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28688 __arm_vqrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28689 {
28690 return __arm_vqrshrntq_m_n_s16 (__a, __b, __imm, __p);
28691 }
28692
28693 __extension__ extern __inline uint16x8_t
28694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28695 __arm_vqrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28696 {
28697 return __arm_vqrshrntq_m_n_u32 (__a, __b, __imm, __p);
28698 }
28699
28700 __extension__ extern __inline uint8x16_t
28701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28702 __arm_vqrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28703 {
28704 return __arm_vqrshrntq_m_n_u16 (__a, __b, __imm, __p);
28705 }
28706
28707 __extension__ extern __inline uint16x8_t
28708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28709 __arm_vqrshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28710 {
28711 return __arm_vqrshrunbq_m_n_s32 (__a, __b, __imm, __p);
28712 }
28713
28714 __extension__ extern __inline uint8x16_t
28715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28716 __arm_vqrshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28717 {
28718 return __arm_vqrshrunbq_m_n_s16 (__a, __b, __imm, __p);
28719 }
28720
28721 __extension__ extern __inline uint16x8_t
28722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28723 __arm_vqrshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28724 {
28725 return __arm_vqrshruntq_m_n_s32 (__a, __b, __imm, __p);
28726 }
28727
28728 __extension__ extern __inline uint8x16_t
28729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28730 __arm_vqrshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28731 {
28732 return __arm_vqrshruntq_m_n_s16 (__a, __b, __imm, __p);
28733 }
28734
28735 __extension__ extern __inline int16x8_t
28736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28737 __arm_vqshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28738 {
28739 return __arm_vqshrnbq_m_n_s32 (__a, __b, __imm, __p);
28740 }
28741
28742 __extension__ extern __inline int8x16_t
28743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28744 __arm_vqshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28745 {
28746 return __arm_vqshrnbq_m_n_s16 (__a, __b, __imm, __p);
28747 }
28748
28749 __extension__ extern __inline uint16x8_t
28750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28751 __arm_vqshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28752 {
28753 return __arm_vqshrnbq_m_n_u32 (__a, __b, __imm, __p);
28754 }
28755
28756 __extension__ extern __inline uint8x16_t
28757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28758 __arm_vqshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28759 {
28760 return __arm_vqshrnbq_m_n_u16 (__a, __b, __imm, __p);
28761 }
28762
28763 __extension__ extern __inline int16x8_t
28764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28765 __arm_vqshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28766 {
28767 return __arm_vqshrntq_m_n_s32 (__a, __b, __imm, __p);
28768 }
28769
28770 __extension__ extern __inline int8x16_t
28771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28772 __arm_vqshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28773 {
28774 return __arm_vqshrntq_m_n_s16 (__a, __b, __imm, __p);
28775 }
28776
28777 __extension__ extern __inline uint16x8_t
28778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28779 __arm_vqshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28780 {
28781 return __arm_vqshrntq_m_n_u32 (__a, __b, __imm, __p);
28782 }
28783
28784 __extension__ extern __inline uint8x16_t
28785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28786 __arm_vqshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28787 {
28788 return __arm_vqshrntq_m_n_u16 (__a, __b, __imm, __p);
28789 }
28790
28791 __extension__ extern __inline uint16x8_t
28792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28793 __arm_vqshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28794 {
28795 return __arm_vqshrunbq_m_n_s32 (__a, __b, __imm, __p);
28796 }
28797
28798 __extension__ extern __inline uint8x16_t
28799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28800 __arm_vqshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28801 {
28802 return __arm_vqshrunbq_m_n_s16 (__a, __b, __imm, __p);
28803 }
28804
28805 __extension__ extern __inline uint16x8_t
28806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28807 __arm_vqshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28808 {
28809 return __arm_vqshruntq_m_n_s32 (__a, __b, __imm, __p);
28810 }
28811
28812 __extension__ extern __inline uint8x16_t
28813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28814 __arm_vqshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28815 {
28816 return __arm_vqshruntq_m_n_s16 (__a, __b, __imm, __p);
28817 }
28818
28819 __extension__ extern __inline int64_t
28820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28821 __arm_vrmlaldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28822 {
28823 return __arm_vrmlaldavhaq_p_s32 (__a, __b, __c, __p);
28824 }
28825
28826 __extension__ extern __inline uint64_t
28827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28828 __arm_vrmlaldavhaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
28829 {
28830 return __arm_vrmlaldavhaq_p_u32 (__a, __b, __c, __p);
28831 }
28832
28833 __extension__ extern __inline int64_t
28834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28835 __arm_vrmlaldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28836 {
28837 return __arm_vrmlaldavhaxq_p_s32 (__a, __b, __c, __p);
28838 }
28839
28840 __extension__ extern __inline int64_t
28841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28842 __arm_vrmlsldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28843 {
28844 return __arm_vrmlsldavhaq_p_s32 (__a, __b, __c, __p);
28845 }
28846
28847 __extension__ extern __inline int64_t
28848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28849 __arm_vrmlsldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
28850 {
28851 return __arm_vrmlsldavhaxq_p_s32 (__a, __b, __c, __p);
28852 }
28853
28854 __extension__ extern __inline int16x8_t
28855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28856 __arm_vrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28857 {
28858 return __arm_vrshrnbq_m_n_s32 (__a, __b, __imm, __p);
28859 }
28860
28861 __extension__ extern __inline int8x16_t
28862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28863 __arm_vrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28864 {
28865 return __arm_vrshrnbq_m_n_s16 (__a, __b, __imm, __p);
28866 }
28867
28868 __extension__ extern __inline uint16x8_t
28869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28870 __arm_vrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28871 {
28872 return __arm_vrshrnbq_m_n_u32 (__a, __b, __imm, __p);
28873 }
28874
28875 __extension__ extern __inline uint8x16_t
28876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28877 __arm_vrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28878 {
28879 return __arm_vrshrnbq_m_n_u16 (__a, __b, __imm, __p);
28880 }
28881
28882 __extension__ extern __inline int16x8_t
28883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28884 __arm_vrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28885 {
28886 return __arm_vrshrntq_m_n_s32 (__a, __b, __imm, __p);
28887 }
28888
28889 __extension__ extern __inline int8x16_t
28890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28891 __arm_vrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28892 {
28893 return __arm_vrshrntq_m_n_s16 (__a, __b, __imm, __p);
28894 }
28895
28896 __extension__ extern __inline uint16x8_t
28897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28898 __arm_vrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28899 {
28900 return __arm_vrshrntq_m_n_u32 (__a, __b, __imm, __p);
28901 }
28902
28903 __extension__ extern __inline uint8x16_t
28904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28905 __arm_vrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28906 {
28907 return __arm_vrshrntq_m_n_u16 (__a, __b, __imm, __p);
28908 }
28909
28910 __extension__ extern __inline int16x8_t
28911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28912 __arm_vshllbq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28913 {
28914 return __arm_vshllbq_m_n_s8 (__inactive, __a, __imm, __p);
28915 }
28916
28917 __extension__ extern __inline int32x4_t
28918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28919 __arm_vshllbq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28920 {
28921 return __arm_vshllbq_m_n_s16 (__inactive, __a, __imm, __p);
28922 }
28923
28924 __extension__ extern __inline uint16x8_t
28925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28926 __arm_vshllbq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28927 {
28928 return __arm_vshllbq_m_n_u8 (__inactive, __a, __imm, __p);
28929 }
28930
28931 __extension__ extern __inline uint32x4_t
28932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28933 __arm_vshllbq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28934 {
28935 return __arm_vshllbq_m_n_u16 (__inactive, __a, __imm, __p);
28936 }
28937
28938 __extension__ extern __inline int16x8_t
28939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28940 __arm_vshlltq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
28941 {
28942 return __arm_vshlltq_m_n_s8 (__inactive, __a, __imm, __p);
28943 }
28944
28945 __extension__ extern __inline int32x4_t
28946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28947 __arm_vshlltq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
28948 {
28949 return __arm_vshlltq_m_n_s16 (__inactive, __a, __imm, __p);
28950 }
28951
28952 __extension__ extern __inline uint16x8_t
28953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28954 __arm_vshlltq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
28955 {
28956 return __arm_vshlltq_m_n_u8 (__inactive, __a, __imm, __p);
28957 }
28958
28959 __extension__ extern __inline uint32x4_t
28960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28961 __arm_vshlltq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
28962 {
28963 return __arm_vshlltq_m_n_u16 (__inactive, __a, __imm, __p);
28964 }
28965
28966 __extension__ extern __inline int16x8_t
28967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28968 __arm_vshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28969 {
28970 return __arm_vshrnbq_m_n_s32 (__a, __b, __imm, __p);
28971 }
28972
28973 __extension__ extern __inline int8x16_t
28974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28975 __arm_vshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
28976 {
28977 return __arm_vshrnbq_m_n_s16 (__a, __b, __imm, __p);
28978 }
28979
28980 __extension__ extern __inline uint16x8_t
28981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28982 __arm_vshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
28983 {
28984 return __arm_vshrnbq_m_n_u32 (__a, __b, __imm, __p);
28985 }
28986
28987 __extension__ extern __inline uint8x16_t
28988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28989 __arm_vshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
28990 {
28991 return __arm_vshrnbq_m_n_u16 (__a, __b, __imm, __p);
28992 }
28993
28994 __extension__ extern __inline int16x8_t
28995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28996 __arm_vshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
28997 {
28998 return __arm_vshrntq_m_n_s32 (__a, __b, __imm, __p);
28999 }
29000
29001 __extension__ extern __inline int8x16_t
29002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29003 __arm_vshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
29004 {
29005 return __arm_vshrntq_m_n_s16 (__a, __b, __imm, __p);
29006 }
29007
29008 __extension__ extern __inline uint16x8_t
29009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29010 __arm_vshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
29011 {
29012 return __arm_vshrntq_m_n_u32 (__a, __b, __imm, __p);
29013 }
29014
29015 __extension__ extern __inline uint8x16_t
29016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29017 __arm_vshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
29018 {
29019 return __arm_vshrntq_m_n_u16 (__a, __b, __imm, __p);
29020 }
29021
29022 __extension__ extern __inline void
29023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29024 __arm_vstrbq_scatter_offset (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
29025 {
29026 __arm_vstrbq_scatter_offset_s8 (__base, __offset, __value);
29027 }
29028
29029 __extension__ extern __inline void
29030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29031 __arm_vstrbq_scatter_offset (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
29032 {
29033 __arm_vstrbq_scatter_offset_s32 (__base, __offset, __value);
29034 }
29035
29036 __extension__ extern __inline void
29037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29038 __arm_vstrbq_scatter_offset (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
29039 {
29040 __arm_vstrbq_scatter_offset_s16 (__base, __offset, __value);
29041 }
29042
29043 __extension__ extern __inline void
29044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29045 __arm_vstrbq_scatter_offset (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
29046 {
29047 __arm_vstrbq_scatter_offset_u8 (__base, __offset, __value);
29048 }
29049
29050 __extension__ extern __inline void
29051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29052 __arm_vstrbq_scatter_offset (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
29053 {
29054 __arm_vstrbq_scatter_offset_u32 (__base, __offset, __value);
29055 }
29056
29057 __extension__ extern __inline void
29058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29059 __arm_vstrbq_scatter_offset (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
29060 {
29061 __arm_vstrbq_scatter_offset_u16 (__base, __offset, __value);
29062 }
29063
29064 __extension__ extern __inline void
29065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29066 __arm_vstrbq (int8_t * __addr, int8x16_t __value)
29067 {
29068 __arm_vstrbq_s8 (__addr, __value);
29069 }
29070
29071 __extension__ extern __inline void
29072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29073 __arm_vstrbq (int8_t * __addr, int32x4_t __value)
29074 {
29075 __arm_vstrbq_s32 (__addr, __value);
29076 }
29077
29078 __extension__ extern __inline void
29079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29080 __arm_vstrbq (int8_t * __addr, int16x8_t __value)
29081 {
29082 __arm_vstrbq_s16 (__addr, __value);
29083 }
29084
29085 __extension__ extern __inline void
29086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29087 __arm_vstrbq (uint8_t * __addr, uint8x16_t __value)
29088 {
29089 __arm_vstrbq_u8 (__addr, __value);
29090 }
29091
29092 __extension__ extern __inline void
29093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29094 __arm_vstrbq (uint8_t * __addr, uint32x4_t __value)
29095 {
29096 __arm_vstrbq_u32 (__addr, __value);
29097 }
29098
29099 __extension__ extern __inline void
29100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29101 __arm_vstrbq (uint8_t * __addr, uint16x8_t __value)
29102 {
29103 __arm_vstrbq_u16 (__addr, __value);
29104 }
29105
29106 __extension__ extern __inline void
29107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29108 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, int32x4_t __value)
29109 {
29110 __arm_vstrwq_scatter_base_s32 (__addr, __offset, __value);
29111 }
29112
29113 __extension__ extern __inline void
29114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29115 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, uint32x4_t __value)
29116 {
29117 __arm_vstrwq_scatter_base_u32 (__addr, __offset, __value);
29118 }
29119
29120 __extension__ extern __inline uint8x16_t
29121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29122 __arm_vldrbq_gather_offset (uint8_t const * __base, uint8x16_t __offset)
29123 {
29124 return __arm_vldrbq_gather_offset_u8 (__base, __offset);
29125 }
29126
29127 __extension__ extern __inline int8x16_t
29128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29129 __arm_vldrbq_gather_offset (int8_t const * __base, uint8x16_t __offset)
29130 {
29131 return __arm_vldrbq_gather_offset_s8 (__base, __offset);
29132 }
29133
29134 __extension__ extern __inline uint16x8_t
29135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29136 __arm_vldrbq_gather_offset (uint8_t const * __base, uint16x8_t __offset)
29137 {
29138 return __arm_vldrbq_gather_offset_u16 (__base, __offset);
29139 }
29140
29141 __extension__ extern __inline int16x8_t
29142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29143 __arm_vldrbq_gather_offset (int8_t const * __base, uint16x8_t __offset)
29144 {
29145 return __arm_vldrbq_gather_offset_s16 (__base, __offset);
29146 }
29147
29148 __extension__ extern __inline uint32x4_t
29149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29150 __arm_vldrbq_gather_offset (uint8_t const * __base, uint32x4_t __offset)
29151 {
29152 return __arm_vldrbq_gather_offset_u32 (__base, __offset);
29153 }
29154
29155 __extension__ extern __inline int32x4_t
29156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29157 __arm_vldrbq_gather_offset (int8_t const * __base, uint32x4_t __offset)
29158 {
29159 return __arm_vldrbq_gather_offset_s32 (__base, __offset);
29160 }
29161
29162 __extension__ extern __inline void
29163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29164 __arm_vstrbq_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
29165 {
29166 __arm_vstrbq_p_s8 (__addr, __value, __p);
29167 }
29168
29169 __extension__ extern __inline void
29170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29171 __arm_vstrbq_p (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
29172 {
29173 __arm_vstrbq_p_s32 (__addr, __value, __p);
29174 }
29175
29176 __extension__ extern __inline void
29177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29178 __arm_vstrbq_p (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
29179 {
29180 __arm_vstrbq_p_s16 (__addr, __value, __p);
29181 }
29182
29183 __extension__ extern __inline void
29184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29185 __arm_vstrbq_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
29186 {
29187 __arm_vstrbq_p_u8 (__addr, __value, __p);
29188 }
29189
29190 __extension__ extern __inline void
29191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29192 __arm_vstrbq_p (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29193 {
29194 __arm_vstrbq_p_u32 (__addr, __value, __p);
29195 }
29196
29197 __extension__ extern __inline void
29198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29199 __arm_vstrbq_p (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
29200 {
29201 __arm_vstrbq_p_u16 (__addr, __value, __p);
29202 }
29203
29204 __extension__ extern __inline void
29205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29206 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
29207 {
29208 __arm_vstrbq_scatter_offset_p_s8 (__base, __offset, __value, __p);
29209 }
29210
29211 __extension__ extern __inline void
29212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29213 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29214 {
29215 __arm_vstrbq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29216 }
29217
29218 __extension__ extern __inline void
29219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29220 __arm_vstrbq_scatter_offset_p (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29221 {
29222 __arm_vstrbq_scatter_offset_p_s16 (__base, __offset, __value, __p);
29223 }
29224
29225 __extension__ extern __inline void
29226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29227 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
29228 {
29229 __arm_vstrbq_scatter_offset_p_u8 (__base, __offset, __value, __p);
29230 }
29231
29232 __extension__ extern __inline void
29233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29234 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29235 {
29236 __arm_vstrbq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29237 }
29238
29239 __extension__ extern __inline void
29240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29241 __arm_vstrbq_scatter_offset_p (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29242 {
29243 __arm_vstrbq_scatter_offset_p_u16 (__base, __offset, __value, __p);
29244 }
29245
29246 __extension__ extern __inline void
29247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29248 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
29249 {
29250 __arm_vstrwq_scatter_base_p_s32 (__addr, __offset, __value, __p);
29251 }
29252
29253 __extension__ extern __inline void
29254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29255 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
29256 {
29257 __arm_vstrwq_scatter_base_p_u32 (__addr, __offset, __value, __p);
29258 }
29259
29260 __extension__ extern __inline int8x16_t
29261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29262 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
29263 {
29264 return __arm_vldrbq_gather_offset_z_s8 (__base, __offset, __p);
29265 }
29266
29267 __extension__ extern __inline int32x4_t
29268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29269 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29270 {
29271 return __arm_vldrbq_gather_offset_z_s32 (__base, __offset, __p);
29272 }
29273
29274 __extension__ extern __inline int16x8_t
29275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29276 __arm_vldrbq_gather_offset_z (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29277 {
29278 return __arm_vldrbq_gather_offset_z_s16 (__base, __offset, __p);
29279 }
29280
29281 __extension__ extern __inline uint8x16_t
29282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29283 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
29284 {
29285 return __arm_vldrbq_gather_offset_z_u8 (__base, __offset, __p);
29286 }
29287
29288 __extension__ extern __inline uint32x4_t
29289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29290 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29291 {
29292 return __arm_vldrbq_gather_offset_z_u32 (__base, __offset, __p);
29293 }
29294
29295 __extension__ extern __inline uint16x8_t
29296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29297 __arm_vldrbq_gather_offset_z (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29298 {
29299 return __arm_vldrbq_gather_offset_z_u16 (__base, __offset, __p);
29300 }
29301
29302 __extension__ extern __inline int8x16_t
29303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29304 __arm_vld1q (int8_t const * __base)
29305 {
29306 return __arm_vld1q_s8 (__base);
29307 }
29308
29309 __extension__ extern __inline int32x4_t
29310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29311 __arm_vld1q (int32_t const * __base)
29312 {
29313 return __arm_vld1q_s32 (__base);
29314 }
29315
29316 __extension__ extern __inline int16x8_t
29317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29318 __arm_vld1q (int16_t const * __base)
29319 {
29320 return __arm_vld1q_s16 (__base);
29321 }
29322
29323 __extension__ extern __inline uint8x16_t
29324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29325 __arm_vld1q (uint8_t const * __base)
29326 {
29327 return __arm_vld1q_u8 (__base);
29328 }
29329
29330 __extension__ extern __inline uint32x4_t
29331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29332 __arm_vld1q (uint32_t const * __base)
29333 {
29334 return __arm_vld1q_u32 (__base);
29335 }
29336
29337 __extension__ extern __inline uint16x8_t
29338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29339 __arm_vld1q (uint16_t const * __base)
29340 {
29341 return __arm_vld1q_u16 (__base);
29342 }
29343
29344 __extension__ extern __inline int32x4_t
29345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29346 __arm_vldrhq_gather_offset (int16_t const * __base, uint32x4_t __offset)
29347 {
29348 return __arm_vldrhq_gather_offset_s32 (__base, __offset);
29349 }
29350
29351 __extension__ extern __inline int16x8_t
29352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29353 __arm_vldrhq_gather_offset (int16_t const * __base, uint16x8_t __offset)
29354 {
29355 return __arm_vldrhq_gather_offset_s16 (__base, __offset);
29356 }
29357
29358 __extension__ extern __inline uint32x4_t
29359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29360 __arm_vldrhq_gather_offset (uint16_t const * __base, uint32x4_t __offset)
29361 {
29362 return __arm_vldrhq_gather_offset_u32 (__base, __offset);
29363 }
29364
29365 __extension__ extern __inline uint16x8_t
29366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29367 __arm_vldrhq_gather_offset (uint16_t const * __base, uint16x8_t __offset)
29368 {
29369 return __arm_vldrhq_gather_offset_u16 (__base, __offset);
29370 }
29371
29372 __extension__ extern __inline int32x4_t
29373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29374 __arm_vldrhq_gather_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29375 {
29376 return __arm_vldrhq_gather_offset_z_s32 (__base, __offset, __p);
29377 }
29378
29379 __extension__ extern __inline int16x8_t
29380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29381 __arm_vldrhq_gather_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29382 {
29383 return __arm_vldrhq_gather_offset_z_s16 (__base, __offset, __p);
29384 }
29385
29386 __extension__ extern __inline uint32x4_t
29387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29388 __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29389 {
29390 return __arm_vldrhq_gather_offset_z_u32 (__base, __offset, __p);
29391 }
29392
29393 __extension__ extern __inline uint16x8_t
29394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29395 __arm_vldrhq_gather_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29396 {
29397 return __arm_vldrhq_gather_offset_z_u16 (__base, __offset, __p);
29398 }
29399
29400 __extension__ extern __inline int32x4_t
29401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29402 __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset)
29403 {
29404 return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset);
29405 }
29406
29407 __extension__ extern __inline int16x8_t
29408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29409 __arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset)
29410 {
29411 return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset);
29412 }
29413
29414 __extension__ extern __inline uint32x4_t
29415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29416 __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset)
29417 {
29418 return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset);
29419 }
29420
29421 __extension__ extern __inline uint16x8_t
29422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29423 __arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset)
29424 {
29425 return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset);
29426 }
29427
29428 __extension__ extern __inline int32x4_t
29429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29430 __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29431 {
29432 return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p);
29433 }
29434
29435 __extension__ extern __inline int16x8_t
29436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29437 __arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29438 {
29439 return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p);
29440 }
29441
29442 __extension__ extern __inline uint32x4_t
29443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29444 __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29445 {
29446 return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p);
29447 }
29448
29449 __extension__ extern __inline uint16x8_t
29450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29451 __arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
29452 {
29453 return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p);
29454 }
29455
29456 __extension__ extern __inline int64x2_t
29457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29458 __arm_vldrdq_gather_offset (int64_t const * __base, uint64x2_t __offset)
29459 {
29460 return __arm_vldrdq_gather_offset_s64 (__base, __offset);
29461 }
29462
29463 __extension__ extern __inline uint64x2_t
29464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29465 __arm_vldrdq_gather_offset (uint64_t const * __base, uint64x2_t __offset)
29466 {
29467 return __arm_vldrdq_gather_offset_u64 (__base, __offset);
29468 }
29469
29470 __extension__ extern __inline int64x2_t
29471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29472 __arm_vldrdq_gather_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29473 {
29474 return __arm_vldrdq_gather_offset_z_s64 (__base, __offset, __p);
29475 }
29476
29477 __extension__ extern __inline uint64x2_t
29478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29479 __arm_vldrdq_gather_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29480 {
29481 return __arm_vldrdq_gather_offset_z_u64 (__base, __offset, __p);
29482 }
29483
29484 __extension__ extern __inline int64x2_t
29485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29486 __arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset)
29487 {
29488 return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset);
29489 }
29490
29491 __extension__ extern __inline uint64x2_t
29492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29493 __arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset)
29494 {
29495 return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset);
29496 }
29497
29498 __extension__ extern __inline int64x2_t
29499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29500 __arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29501 {
29502 return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p);
29503 }
29504
29505 __extension__ extern __inline uint64x2_t
29506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29507 __arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
29508 {
29509 return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p);
29510 }
29511
29512 __extension__ extern __inline int32x4_t
29513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29514 __arm_vldrwq_gather_offset (int32_t const * __base, uint32x4_t __offset)
29515 {
29516 return __arm_vldrwq_gather_offset_s32 (__base, __offset);
29517 }
29518
29519 __extension__ extern __inline uint32x4_t
29520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29521 __arm_vldrwq_gather_offset (uint32_t const * __base, uint32x4_t __offset)
29522 {
29523 return __arm_vldrwq_gather_offset_u32 (__base, __offset);
29524 }
29525
29526 __extension__ extern __inline int32x4_t
29527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29528 __arm_vldrwq_gather_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29529 {
29530 return __arm_vldrwq_gather_offset_z_s32 (__base, __offset, __p);
29531 }
29532
29533 __extension__ extern __inline uint32x4_t
29534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29535 __arm_vldrwq_gather_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29536 {
29537 return __arm_vldrwq_gather_offset_z_u32 (__base, __offset, __p);
29538 }
29539
29540 __extension__ extern __inline int32x4_t
29541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29542 __arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset)
29543 {
29544 return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset);
29545 }
29546
29547 __extension__ extern __inline uint32x4_t
29548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29549 __arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset)
29550 {
29551 return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset);
29552 }
29553
29554 __extension__ extern __inline int32x4_t
29555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29556 __arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29557 {
29558 return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p);
29559 }
29560
29561 __extension__ extern __inline uint32x4_t
29562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29563 __arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
29564 {
29565 return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p);
29566 }
29567
29568 __extension__ extern __inline void
29569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29570 __arm_vst1q (int8_t * __addr, int8x16_t __value)
29571 {
29572 __arm_vst1q_s8 (__addr, __value);
29573 }
29574
29575 __extension__ extern __inline void
29576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29577 __arm_vst1q (int32_t * __addr, int32x4_t __value)
29578 {
29579 __arm_vst1q_s32 (__addr, __value);
29580 }
29581
29582 __extension__ extern __inline void
29583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29584 __arm_vst1q (int16_t * __addr, int16x8_t __value)
29585 {
29586 __arm_vst1q_s16 (__addr, __value);
29587 }
29588
29589 __extension__ extern __inline void
29590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29591 __arm_vst1q (uint8_t * __addr, uint8x16_t __value)
29592 {
29593 __arm_vst1q_u8 (__addr, __value);
29594 }
29595
29596 __extension__ extern __inline void
29597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29598 __arm_vst1q (uint32_t * __addr, uint32x4_t __value)
29599 {
29600 __arm_vst1q_u32 (__addr, __value);
29601 }
29602
29603 __extension__ extern __inline void
29604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29605 __arm_vst1q (uint16_t * __addr, uint16x8_t __value)
29606 {
29607 __arm_vst1q_u16 (__addr, __value);
29608 }
29609
29610 __extension__ extern __inline void
29611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29612 __arm_vstrhq_scatter_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
29613 {
29614 __arm_vstrhq_scatter_offset_s32 (__base, __offset, __value);
29615 }
29616
29617 __extension__ extern __inline void
29618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29619 __arm_vstrhq_scatter_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
29620 {
29621 __arm_vstrhq_scatter_offset_s16 (__base, __offset, __value);
29622 }
29623
29624 __extension__ extern __inline void
29625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29626 __arm_vstrhq_scatter_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
29627 {
29628 __arm_vstrhq_scatter_offset_u32 (__base, __offset, __value);
29629 }
29630
29631 __extension__ extern __inline void
29632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29633 __arm_vstrhq_scatter_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
29634 {
29635 __arm_vstrhq_scatter_offset_u16 (__base, __offset, __value);
29636 }
29637
29638 __extension__ extern __inline void
29639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29640 __arm_vstrhq_scatter_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29641 {
29642 __arm_vstrhq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29643 }
29644
29645 __extension__ extern __inline void
29646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29647 __arm_vstrhq_scatter_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29648 {
29649 __arm_vstrhq_scatter_offset_p_s16 (__base, __offset, __value, __p);
29650 }
29651
29652 __extension__ extern __inline void
29653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29654 __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29655 {
29656 __arm_vstrhq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29657 }
29658
29659 __extension__ extern __inline void
29660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29661 __arm_vstrhq_scatter_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29662 {
29663 __arm_vstrhq_scatter_offset_p_u16 (__base, __offset, __value, __p);
29664 }
29665
29666 __extension__ extern __inline void
29667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29668 __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
29669 {
29670 __arm_vstrhq_scatter_shifted_offset_s32 (__base, __offset, __value);
29671 }
29672
29673 __extension__ extern __inline void
29674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29675 __arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
29676 {
29677 __arm_vstrhq_scatter_shifted_offset_s16 (__base, __offset, __value);
29678 }
29679
29680 __extension__ extern __inline void
29681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29682 __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
29683 {
29684 __arm_vstrhq_scatter_shifted_offset_u32 (__base, __offset, __value);
29685 }
29686
29687 __extension__ extern __inline void
29688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29689 __arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
29690 {
29691 __arm_vstrhq_scatter_shifted_offset_u16 (__base, __offset, __value);
29692 }
29693
29694 __extension__ extern __inline void
29695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29696 __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29697 {
29698 __arm_vstrhq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
29699 }
29700
29701 __extension__ extern __inline void
29702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29703 __arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
29704 {
29705 __arm_vstrhq_scatter_shifted_offset_p_s16 (__base, __offset, __value, __p);
29706 }
29707
29708 __extension__ extern __inline void
29709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29710 __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29711 {
29712 __arm_vstrhq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
29713 }
29714
29715 __extension__ extern __inline void
29716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29717 __arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
29718 {
29719 __arm_vstrhq_scatter_shifted_offset_p_u16 (__base, __offset, __value, __p);
29720 }
29721
29722 __extension__ extern __inline void
29723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29724 __arm_vstrhq (int16_t * __addr, int32x4_t __value)
29725 {
29726 __arm_vstrhq_s32 (__addr, __value);
29727 }
29728
29729 __extension__ extern __inline void
29730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29731 __arm_vstrhq (int16_t * __addr, int16x8_t __value)
29732 {
29733 __arm_vstrhq_s16 (__addr, __value);
29734 }
29735
29736 __extension__ extern __inline void
29737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29738 __arm_vstrhq (uint16_t * __addr, uint32x4_t __value)
29739 {
29740 __arm_vstrhq_u32 (__addr, __value);
29741 }
29742
29743 __extension__ extern __inline void
29744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29745 __arm_vstrhq (uint16_t * __addr, uint16x8_t __value)
29746 {
29747 __arm_vstrhq_u16 (__addr, __value);
29748 }
29749
29750 __extension__ extern __inline void
29751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29752 __arm_vstrhq_p (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
29753 {
29754 __arm_vstrhq_p_s32 (__addr, __value, __p);
29755 }
29756
29757 __extension__ extern __inline void
29758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29759 __arm_vstrhq_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
29760 {
29761 __arm_vstrhq_p_s16 (__addr, __value, __p);
29762 }
29763
29764 __extension__ extern __inline void
29765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29766 __arm_vstrhq_p (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29767 {
29768 __arm_vstrhq_p_u32 (__addr, __value, __p);
29769 }
29770
29771 __extension__ extern __inline void
29772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29773 __arm_vstrhq_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
29774 {
29775 __arm_vstrhq_p_u16 (__addr, __value, __p);
29776 }
29777
29778 __extension__ extern __inline void
29779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29780 __arm_vstrwq (int32_t * __addr, int32x4_t __value)
29781 {
29782 __arm_vstrwq_s32 (__addr, __value);
29783 }
29784
29785 __extension__ extern __inline void
29786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29787 __arm_vstrwq (uint32_t * __addr, uint32x4_t __value)
29788 {
29789 __arm_vstrwq_u32 (__addr, __value);
29790 }
29791
29792 __extension__ extern __inline void
29793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29794 __arm_vstrwq_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
29795 {
29796 __arm_vstrwq_p_s32 (__addr, __value, __p);
29797 }
29798
29799 __extension__ extern __inline void
29800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29801 __arm_vstrwq_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
29802 {
29803 __arm_vstrwq_p_u32 (__addr, __value, __p);
29804 }
29805
29806 __extension__ extern __inline void
29807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29808 __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
29809 {
29810 __arm_vstrdq_scatter_base_p_s64 (__addr, __offset, __value, __p);
29811 }
29812
29813 __extension__ extern __inline void
29814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29815 __arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
29816 {
29817 __arm_vstrdq_scatter_base_p_u64 (__addr, __offset, __value, __p);
29818 }
29819
29820 __extension__ extern __inline void
29821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29822 __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, int64x2_t __value)
29823 {
29824 __arm_vstrdq_scatter_base_s64 (__addr, __offset, __value);
29825 }
29826
29827 __extension__ extern __inline void
29828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29829 __arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, uint64x2_t __value)
29830 {
29831 __arm_vstrdq_scatter_base_u64 (__addr, __offset, __value);
29832 }
29833
29834 __extension__ extern __inline void
29835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29836 __arm_vstrdq_scatter_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
29837 {
29838 __arm_vstrdq_scatter_offset_p_s64 (__base, __offset, __value, __p);
29839 }
29840
29841 __extension__ extern __inline void
29842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29843 __arm_vstrdq_scatter_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
29844 {
29845 __arm_vstrdq_scatter_offset_p_u64 (__base, __offset, __value, __p);
29846 }
29847
29848 __extension__ extern __inline void
29849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29850 __arm_vstrdq_scatter_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
29851 {
29852 __arm_vstrdq_scatter_offset_s64 (__base, __offset, __value);
29853 }
29854
29855 __extension__ extern __inline void
29856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29857 __arm_vstrdq_scatter_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
29858 {
29859 __arm_vstrdq_scatter_offset_u64 (__base, __offset, __value);
29860 }
29861
29862 __extension__ extern __inline void
29863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29864 __arm_vstrdq_scatter_shifted_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
29865 {
29866 __arm_vstrdq_scatter_shifted_offset_p_s64 (__base, __offset, __value, __p);
29867 }
29868
29869 __extension__ extern __inline void
29870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29871 __arm_vstrdq_scatter_shifted_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
29872 {
29873 __arm_vstrdq_scatter_shifted_offset_p_u64 (__base, __offset, __value, __p);
29874 }
29875
29876 __extension__ extern __inline void
29877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29878 __arm_vstrdq_scatter_shifted_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
29879 {
29880 __arm_vstrdq_scatter_shifted_offset_s64 (__base, __offset, __value);
29881 }
29882
29883 __extension__ extern __inline void
29884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29885 __arm_vstrdq_scatter_shifted_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
29886 {
29887 __arm_vstrdq_scatter_shifted_offset_u64 (__base, __offset, __value);
29888 }
29889
29890 __extension__ extern __inline void
29891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29892 __arm_vstrwq_scatter_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29893 {
29894 __arm_vstrwq_scatter_offset_p_s32 (__base, __offset, __value, __p);
29895 }
29896
29897 __extension__ extern __inline void
29898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29899 __arm_vstrwq_scatter_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29900 {
29901 __arm_vstrwq_scatter_offset_p_u32 (__base, __offset, __value, __p);
29902 }
29903
29904 __extension__ extern __inline void
29905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29906 __arm_vstrwq_scatter_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
29907 {
29908 __arm_vstrwq_scatter_offset_s32 (__base, __offset, __value);
29909 }
29910
29911 __extension__ extern __inline void
29912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29913 __arm_vstrwq_scatter_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
29914 {
29915 __arm_vstrwq_scatter_offset_u32 (__base, __offset, __value);
29916 }
29917
29918 __extension__ extern __inline void
29919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29920 __arm_vstrwq_scatter_shifted_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
29921 {
29922 __arm_vstrwq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
29923 }
29924
29925 __extension__ extern __inline void
29926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29927 __arm_vstrwq_scatter_shifted_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
29928 {
29929 __arm_vstrwq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
29930 }
29931
29932 __extension__ extern __inline void
29933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29934 __arm_vstrwq_scatter_shifted_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
29935 {
29936 __arm_vstrwq_scatter_shifted_offset_s32 (__base, __offset, __value);
29937 }
29938
29939 __extension__ extern __inline void
29940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29941 __arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
29942 {
29943 __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value);
29944 }
29945
29946 __extension__ extern __inline int8x16_t
29947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29948 __arm_vaddq (int8x16_t __a, int8x16_t __b)
29949 {
29950 return __arm_vaddq_s8 (__a, __b);
29951 }
29952
29953 __extension__ extern __inline int16x8_t
29954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29955 __arm_vaddq (int16x8_t __a, int16x8_t __b)
29956 {
29957 return __arm_vaddq_s16 (__a, __b);
29958 }
29959
29960 __extension__ extern __inline int32x4_t
29961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29962 __arm_vaddq (int32x4_t __a, int32x4_t __b)
29963 {
29964 return __arm_vaddq_s32 (__a, __b);
29965 }
29966
29967 __extension__ extern __inline uint8x16_t
29968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29969 __arm_vaddq (uint8x16_t __a, uint8x16_t __b)
29970 {
29971 return __arm_vaddq_u8 (__a, __b);
29972 }
29973
29974 __extension__ extern __inline uint16x8_t
29975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29976 __arm_vaddq (uint16x8_t __a, uint16x8_t __b)
29977 {
29978 return __arm_vaddq_u16 (__a, __b);
29979 }
29980
29981 __extension__ extern __inline uint32x4_t
29982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29983 __arm_vaddq (uint32x4_t __a, uint32x4_t __b)
29984 {
29985 return __arm_vaddq_u32 (__a, __b);
29986 }
29987
29988 __extension__ extern __inline uint8x16_t
29989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29990 __arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
29991 {
29992 return __arm_vddupq_m_n_u8 (__inactive, __a, __imm, __p);
29993 }
29994
29995 __extension__ extern __inline uint32x4_t
29996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29997 __arm_vddupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
29998 {
29999 return __arm_vddupq_m_n_u32 (__inactive, __a, __imm, __p);
30000 }
30001
30002 __extension__ extern __inline uint16x8_t
30003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30004 __arm_vddupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30005 {
30006 return __arm_vddupq_m_n_u16 (__inactive, __a, __imm, __p);
30007 }
30008
30009 __extension__ extern __inline uint8x16_t
30010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30011 __arm_vddupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30012 {
30013 return __arm_vddupq_m_wb_u8 (__inactive, __a, __imm, __p);
30014 }
30015
30016 __extension__ extern __inline uint16x8_t
30017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30018 __arm_vddupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30019 {
30020 return __arm_vddupq_m_wb_u16 (__inactive, __a, __imm, __p);
30021 }
30022
30023 __extension__ extern __inline uint32x4_t
30024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30025 __arm_vddupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30026 {
30027 return __arm_vddupq_m_wb_u32 (__inactive, __a, __imm, __p);
30028 }
30029
30030 __extension__ extern __inline uint8x16_t
30031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30032 __arm_vddupq_u8 (uint32_t __a, const int __imm)
30033 {
30034 return __arm_vddupq_n_u8 (__a, __imm);
30035 }
30036
30037 __extension__ extern __inline uint32x4_t
30038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30039 __arm_vddupq_u32 (uint32_t __a, const int __imm)
30040 {
30041 return __arm_vddupq_n_u32 (__a, __imm);
30042 }
30043
30044 __extension__ extern __inline uint16x8_t
30045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30046 __arm_vddupq_u16 (uint32_t __a, const int __imm)
30047 {
30048 return __arm_vddupq_n_u16 (__a, __imm);
30049 }
30050
30051 __extension__ extern __inline uint8x16_t
30052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30053 __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30054 {
30055 return __arm_vdwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
30056 }
30057
30058 __extension__ extern __inline uint32x4_t
30059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30060 __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30061 {
30062 return __arm_vdwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
30063 }
30064
30065 __extension__ extern __inline uint16x8_t
30066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30067 __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30068 {
30069 return __arm_vdwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
30070 }
30071
30072 __extension__ extern __inline uint8x16_t
30073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30074 __arm_vdwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30075 {
30076 return __arm_vdwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
30077 }
30078
30079 __extension__ extern __inline uint32x4_t
30080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30081 __arm_vdwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30082 {
30083 return __arm_vdwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
30084 }
30085
30086 __extension__ extern __inline uint16x8_t
30087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30088 __arm_vdwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30089 {
30090 return __arm_vdwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
30091 }
30092
30093 __extension__ extern __inline uint8x16_t
30094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30095 __arm_vdwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
30096 {
30097 return __arm_vdwdupq_n_u8 (__a, __b, __imm);
30098 }
30099
30100 __extension__ extern __inline uint32x4_t
30101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30102 __arm_vdwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
30103 {
30104 return __arm_vdwdupq_n_u32 (__a, __b, __imm);
30105 }
30106
30107 __extension__ extern __inline uint16x8_t
30108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30109 __arm_vdwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
30110 {
30111 return __arm_vdwdupq_n_u16 (__a, __b, __imm);
30112 }
30113
30114 __extension__ extern __inline uint8x16_t
30115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30116 __arm_vdwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
30117 {
30118 return __arm_vdwdupq_wb_u8 (__a, __b, __imm);
30119 }
30120
30121 __extension__ extern __inline uint32x4_t
30122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30123 __arm_vdwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
30124 {
30125 return __arm_vdwdupq_wb_u32 (__a, __b, __imm);
30126 }
30127
30128 __extension__ extern __inline uint16x8_t
30129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30130 __arm_vdwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
30131 {
30132 return __arm_vdwdupq_wb_u16 (__a, __b, __imm);
30133 }
30134
30135 __extension__ extern __inline uint8x16_t
30136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30137 __arm_vidupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30138 {
30139 return __arm_vidupq_m_n_u8 (__inactive, __a, __imm, __p);
30140 }
30141
30142 __extension__ extern __inline uint32x4_t
30143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30144 __arm_vidupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30145 {
30146 return __arm_vidupq_m_n_u32 (__inactive, __a, __imm, __p);
30147 }
30148
30149 __extension__ extern __inline uint16x8_t
30150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30151 __arm_vidupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
30152 {
30153 return __arm_vidupq_m_n_u16 (__inactive, __a, __imm, __p);
30154 }
30155
30156 __extension__ extern __inline uint8x16_t
30157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30158 __arm_vidupq_u8 (uint32_t __a, const int __imm)
30159 {
30160 return __arm_vidupq_n_u8 (__a, __imm);
30161 }
30162
30163 __extension__ extern __inline uint8x16_t
30164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30165 __arm_vidupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30166 {
30167 return __arm_vidupq_m_wb_u8 (__inactive, __a, __imm, __p);
30168 }
30169
30170 __extension__ extern __inline uint16x8_t
30171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30172 __arm_vidupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30173 {
30174 return __arm_vidupq_m_wb_u16 (__inactive, __a, __imm, __p);
30175 }
30176
30177 __extension__ extern __inline uint32x4_t
30178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30179 __arm_vidupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
30180 {
30181 return __arm_vidupq_m_wb_u32 (__inactive, __a, __imm, __p);
30182 }
30183
30184 __extension__ extern __inline uint32x4_t
30185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30186 __arm_vidupq_u32 (uint32_t __a, const int __imm)
30187 {
30188 return __arm_vidupq_n_u32 (__a, __imm);
30189 }
30190
30191 __extension__ extern __inline uint16x8_t
30192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30193 __arm_vidupq_u16 (uint32_t __a, const int __imm)
30194 {
30195 return __arm_vidupq_n_u16 (__a, __imm);
30196 }
30197
30198 __extension__ extern __inline uint8x16_t
30199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30200 __arm_vidupq_u8 (uint32_t * __a, const int __imm)
30201 {
30202 return __arm_vidupq_wb_u8 (__a, __imm);
30203 }
30204
30205 __extension__ extern __inline uint16x8_t
30206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30207 __arm_vidupq_u16 (uint32_t * __a, const int __imm)
30208 {
30209 return __arm_vidupq_wb_u16 (__a, __imm);
30210 }
30211
30212 __extension__ extern __inline uint32x4_t
30213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30214 __arm_vidupq_u32 (uint32_t * __a, const int __imm)
30215 {
30216 return __arm_vidupq_wb_u32 (__a, __imm);
30217 }
30218
30219 __extension__ extern __inline uint8x16_t
30220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30221 __arm_vddupq_u8 (uint32_t * __a, const int __imm)
30222 {
30223 return __arm_vddupq_wb_u8 (__a, __imm);
30224 }
30225
30226 __extension__ extern __inline uint16x8_t
30227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30228 __arm_vddupq_u16 (uint32_t * __a, const int __imm)
30229 {
30230 return __arm_vddupq_wb_u16 (__a, __imm);
30231 }
30232
30233 __extension__ extern __inline uint32x4_t
30234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30235 __arm_vddupq_u32 (uint32_t * __a, const int __imm)
30236 {
30237 return __arm_vddupq_wb_u32 (__a, __imm);
30238 }
30239
30240 __extension__ extern __inline uint8x16_t
30241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30242 __arm_viwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30243 {
30244 return __arm_viwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
30245 }
30246
30247 __extension__ extern __inline uint32x4_t
30248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30249 __arm_viwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30250 {
30251 return __arm_viwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
30252 }
30253
30254 __extension__ extern __inline uint16x8_t
30255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30256 __arm_viwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30257 {
30258 return __arm_viwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
30259 }
30260
30261 __extension__ extern __inline uint8x16_t
30262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30263 __arm_viwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30264 {
30265 return __arm_viwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
30266 }
30267
30268 __extension__ extern __inline uint32x4_t
30269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30270 __arm_viwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30271 {
30272 return __arm_viwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
30273 }
30274
30275 __extension__ extern __inline uint16x8_t
30276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30277 __arm_viwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30278 {
30279 return __arm_viwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
30280 }
30281
30282 __extension__ extern __inline uint8x16_t
30283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30284 __arm_viwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
30285 {
30286 return __arm_viwdupq_n_u8 (__a, __b, __imm);
30287 }
30288
30289 __extension__ extern __inline uint32x4_t
30290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30291 __arm_viwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
30292 {
30293 return __arm_viwdupq_n_u32 (__a, __b, __imm);
30294 }
30295
30296 __extension__ extern __inline uint16x8_t
30297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30298 __arm_viwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
30299 {
30300 return __arm_viwdupq_n_u16 (__a, __b, __imm);
30301 }
30302
30303 __extension__ extern __inline uint8x16_t
30304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30305 __arm_viwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
30306 {
30307 return __arm_viwdupq_wb_u8 (__a, __b, __imm);
30308 }
30309
30310 __extension__ extern __inline uint32x4_t
30311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30312 __arm_viwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
30313 {
30314 return __arm_viwdupq_wb_u32 (__a, __b, __imm);
30315 }
30316
30317 __extension__ extern __inline uint16x8_t
30318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30319 __arm_viwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
30320 {
30321 return __arm_viwdupq_wb_u16 (__a, __b, __imm);
30322 }
30323
30324 __extension__ extern __inline void
30325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30326 __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, int64x2_t __value)
30327 {
30328 __arm_vstrdq_scatter_base_wb_s64 (__addr, __offset, __value);
30329 }
30330
30331 __extension__ extern __inline void
30332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30333 __arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
30334 {
30335 __arm_vstrdq_scatter_base_wb_u64 (__addr, __offset, __value);
30336 }
30337
30338 __extension__ extern __inline void
30339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30340 __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
30341 {
30342 __arm_vstrdq_scatter_base_wb_p_s64 (__addr, __offset, __value, __p);
30343 }
30344
30345 __extension__ extern __inline void
30346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30347 __arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
30348 {
30349 __arm_vstrdq_scatter_base_wb_p_u64 (__addr, __offset, __value, __p);
30350 }
30351
30352 __extension__ extern __inline void
30353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30354 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
30355 {
30356 __arm_vstrwq_scatter_base_wb_p_s32 (__addr, __offset, __value, __p);
30357 }
30358
30359 __extension__ extern __inline void
30360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30361 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
30362 {
30363 __arm_vstrwq_scatter_base_wb_p_u32 (__addr, __offset, __value, __p);
30364 }
30365
30366 __extension__ extern __inline void
30367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30368 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, int32x4_t __value)
30369 {
30370 __arm_vstrwq_scatter_base_wb_s32 (__addr, __offset, __value);
30371 }
30372
30373 __extension__ extern __inline void
30374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30375 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
30376 {
30377 __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value);
30378 }
30379
30380 __extension__ extern __inline uint8x16_t
30381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30382 __arm_vddupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
30383 {
30384 return __arm_vddupq_x_n_u8 (__a, __imm, __p);
30385 }
30386
30387 __extension__ extern __inline uint16x8_t
30388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30389 __arm_vddupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
30390 {
30391 return __arm_vddupq_x_n_u16 (__a, __imm, __p);
30392 }
30393
30394 __extension__ extern __inline uint32x4_t
30395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30396 __arm_vddupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
30397 {
30398 return __arm_vddupq_x_n_u32 (__a, __imm, __p);
30399 }
30400
30401 __extension__ extern __inline uint8x16_t
30402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30403 __arm_vddupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30404 {
30405 return __arm_vddupq_x_wb_u8 (__a, __imm, __p);
30406 }
30407
30408 __extension__ extern __inline uint16x8_t
30409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30410 __arm_vddupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30411 {
30412 return __arm_vddupq_x_wb_u16 (__a, __imm, __p);
30413 }
30414
30415 __extension__ extern __inline uint32x4_t
30416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30417 __arm_vddupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30418 {
30419 return __arm_vddupq_x_wb_u32 (__a, __imm, __p);
30420 }
30421
30422 __extension__ extern __inline uint8x16_t
30423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30424 __arm_vdwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30425 {
30426 return __arm_vdwdupq_x_n_u8 (__a, __b, __imm, __p);
30427 }
30428
30429 __extension__ extern __inline uint16x8_t
30430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30431 __arm_vdwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30432 {
30433 return __arm_vdwdupq_x_n_u16 (__a, __b, __imm, __p);
30434 }
30435
30436 __extension__ extern __inline uint32x4_t
30437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30438 __arm_vdwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30439 {
30440 return __arm_vdwdupq_x_n_u32 (__a, __b, __imm, __p);
30441 }
30442
30443 __extension__ extern __inline uint8x16_t
30444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30445 __arm_vdwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30446 {
30447 return __arm_vdwdupq_x_wb_u8 (__a, __b, __imm, __p);
30448 }
30449
30450 __extension__ extern __inline uint16x8_t
30451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30452 __arm_vdwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30453 {
30454 return __arm_vdwdupq_x_wb_u16 (__a, __b, __imm, __p);
30455 }
30456
30457 __extension__ extern __inline uint32x4_t
30458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30459 __arm_vdwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30460 {
30461 return __arm_vdwdupq_x_wb_u32 (__a, __b, __imm, __p);
30462 }
30463
30464 __extension__ extern __inline uint8x16_t
30465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30466 __arm_vidupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
30467 {
30468 return __arm_vidupq_x_n_u8 (__a, __imm, __p);
30469 }
30470
30471 __extension__ extern __inline uint16x8_t
30472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30473 __arm_vidupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
30474 {
30475 return __arm_vidupq_x_n_u16 (__a, __imm, __p);
30476 }
30477
30478 __extension__ extern __inline uint32x4_t
30479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30480 __arm_vidupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
30481 {
30482 return __arm_vidupq_x_n_u32 (__a, __imm, __p);
30483 }
30484
30485 __extension__ extern __inline uint8x16_t
30486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30487 __arm_vidupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30488 {
30489 return __arm_vidupq_x_wb_u8 (__a, __imm, __p);
30490 }
30491
30492 __extension__ extern __inline uint16x8_t
30493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30494 __arm_vidupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30495 {
30496 return __arm_vidupq_x_wb_u16 (__a, __imm, __p);
30497 }
30498
30499 __extension__ extern __inline uint32x4_t
30500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30501 __arm_vidupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
30502 {
30503 return __arm_vidupq_x_wb_u32 (__a, __imm, __p);
30504 }
30505
30506 __extension__ extern __inline uint8x16_t
30507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30508 __arm_viwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30509 {
30510 return __arm_viwdupq_x_n_u8 (__a, __b, __imm, __p);
30511 }
30512
30513 __extension__ extern __inline uint16x8_t
30514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30515 __arm_viwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30516 {
30517 return __arm_viwdupq_x_n_u16 (__a, __b, __imm, __p);
30518 }
30519
30520 __extension__ extern __inline uint32x4_t
30521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30522 __arm_viwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
30523 {
30524 return __arm_viwdupq_x_n_u32 (__a, __b, __imm, __p);
30525 }
30526
30527 __extension__ extern __inline uint8x16_t
30528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30529 __arm_viwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30530 {
30531 return __arm_viwdupq_x_wb_u8 (__a, __b, __imm, __p);
30532 }
30533
30534 __extension__ extern __inline uint16x8_t
30535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30536 __arm_viwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30537 {
30538 return __arm_viwdupq_x_wb_u16 (__a, __b, __imm, __p);
30539 }
30540
30541 __extension__ extern __inline uint32x4_t
30542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30543 __arm_viwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
30544 {
30545 return __arm_viwdupq_x_wb_u32 (__a, __b, __imm, __p);
30546 }
30547
30548 __extension__ extern __inline int8x16_t
30549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30550 __arm_vminq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30551 {
30552 return __arm_vminq_x_s8 (__a, __b, __p);
30553 }
30554
30555 __extension__ extern __inline int16x8_t
30556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30557 __arm_vminq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30558 {
30559 return __arm_vminq_x_s16 (__a, __b, __p);
30560 }
30561
30562 __extension__ extern __inline int32x4_t
30563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30564 __arm_vminq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30565 {
30566 return __arm_vminq_x_s32 (__a, __b, __p);
30567 }
30568
30569 __extension__ extern __inline uint8x16_t
30570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30571 __arm_vminq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30572 {
30573 return __arm_vminq_x_u8 (__a, __b, __p);
30574 }
30575
30576 __extension__ extern __inline uint16x8_t
30577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30578 __arm_vminq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30579 {
30580 return __arm_vminq_x_u16 (__a, __b, __p);
30581 }
30582
30583 __extension__ extern __inline uint32x4_t
30584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30585 __arm_vminq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30586 {
30587 return __arm_vminq_x_u32 (__a, __b, __p);
30588 }
30589
30590 __extension__ extern __inline int8x16_t
30591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30592 __arm_vmaxq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30593 {
30594 return __arm_vmaxq_x_s8 (__a, __b, __p);
30595 }
30596
30597 __extension__ extern __inline int16x8_t
30598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30599 __arm_vmaxq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30600 {
30601 return __arm_vmaxq_x_s16 (__a, __b, __p);
30602 }
30603
30604 __extension__ extern __inline int32x4_t
30605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30606 __arm_vmaxq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30607 {
30608 return __arm_vmaxq_x_s32 (__a, __b, __p);
30609 }
30610
30611 __extension__ extern __inline uint8x16_t
30612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30613 __arm_vmaxq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30614 {
30615 return __arm_vmaxq_x_u8 (__a, __b, __p);
30616 }
30617
30618 __extension__ extern __inline uint16x8_t
30619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30620 __arm_vmaxq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30621 {
30622 return __arm_vmaxq_x_u16 (__a, __b, __p);
30623 }
30624
30625 __extension__ extern __inline uint32x4_t
30626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30627 __arm_vmaxq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30628 {
30629 return __arm_vmaxq_x_u32 (__a, __b, __p);
30630 }
30631
30632 __extension__ extern __inline int8x16_t
30633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30634 __arm_vabdq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30635 {
30636 return __arm_vabdq_x_s8 (__a, __b, __p);
30637 }
30638
30639 __extension__ extern __inline int16x8_t
30640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30641 __arm_vabdq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30642 {
30643 return __arm_vabdq_x_s16 (__a, __b, __p);
30644 }
30645
30646 __extension__ extern __inline int32x4_t
30647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30648 __arm_vabdq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30649 {
30650 return __arm_vabdq_x_s32 (__a, __b, __p);
30651 }
30652
30653 __extension__ extern __inline uint8x16_t
30654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30655 __arm_vabdq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30656 {
30657 return __arm_vabdq_x_u8 (__a, __b, __p);
30658 }
30659
30660 __extension__ extern __inline uint16x8_t
30661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30662 __arm_vabdq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30663 {
30664 return __arm_vabdq_x_u16 (__a, __b, __p);
30665 }
30666
30667 __extension__ extern __inline uint32x4_t
30668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30669 __arm_vabdq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30670 {
30671 return __arm_vabdq_x_u32 (__a, __b, __p);
30672 }
30673
30674 __extension__ extern __inline int8x16_t
30675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30676 __arm_vabsq_x (int8x16_t __a, mve_pred16_t __p)
30677 {
30678 return __arm_vabsq_x_s8 (__a, __p);
30679 }
30680
30681 __extension__ extern __inline int16x8_t
30682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30683 __arm_vabsq_x (int16x8_t __a, mve_pred16_t __p)
30684 {
30685 return __arm_vabsq_x_s16 (__a, __p);
30686 }
30687
30688 __extension__ extern __inline int32x4_t
30689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30690 __arm_vabsq_x (int32x4_t __a, mve_pred16_t __p)
30691 {
30692 return __arm_vabsq_x_s32 (__a, __p);
30693 }
30694
30695 __extension__ extern __inline int8x16_t
30696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30697 __arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30698 {
30699 return __arm_vaddq_x_s8 (__a, __b, __p);
30700 }
30701
30702 __extension__ extern __inline int16x8_t
30703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30704 __arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30705 {
30706 return __arm_vaddq_x_s16 (__a, __b, __p);
30707 }
30708
30709 __extension__ extern __inline int32x4_t
30710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30711 __arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30712 {
30713 return __arm_vaddq_x_s32 (__a, __b, __p);
30714 }
30715
30716 __extension__ extern __inline int8x16_t
30717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30718 __arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
30719 {
30720 return __arm_vaddq_x_n_s8 (__a, __b, __p);
30721 }
30722
30723 __extension__ extern __inline int16x8_t
30724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30725 __arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
30726 {
30727 return __arm_vaddq_x_n_s16 (__a, __b, __p);
30728 }
30729
30730 __extension__ extern __inline int32x4_t
30731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30732 __arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
30733 {
30734 return __arm_vaddq_x_n_s32 (__a, __b, __p);
30735 }
30736
30737 __extension__ extern __inline uint8x16_t
30738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30739 __arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30740 {
30741 return __arm_vaddq_x_u8 (__a, __b, __p);
30742 }
30743
30744 __extension__ extern __inline uint16x8_t
30745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30746 __arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30747 {
30748 return __arm_vaddq_x_u16 (__a, __b, __p);
30749 }
30750
30751 __extension__ extern __inline uint32x4_t
30752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30753 __arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30754 {
30755 return __arm_vaddq_x_u32 (__a, __b, __p);
30756 }
30757
30758 __extension__ extern __inline uint8x16_t
30759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30760 __arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
30761 {
30762 return __arm_vaddq_x_n_u8 (__a, __b, __p);
30763 }
30764
30765 __extension__ extern __inline uint16x8_t
30766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30767 __arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
30768 {
30769 return __arm_vaddq_x_n_u16 (__a, __b, __p);
30770 }
30771
30772 __extension__ extern __inline uint32x4_t
30773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30774 __arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
30775 {
30776 return __arm_vaddq_x_n_u32 (__a, __b, __p);
30777 }
30778
30779 __extension__ extern __inline int8x16_t
30780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30781 __arm_vclsq_x (int8x16_t __a, mve_pred16_t __p)
30782 {
30783 return __arm_vclsq_x_s8 (__a, __p);
30784 }
30785
30786 __extension__ extern __inline int16x8_t
30787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30788 __arm_vclsq_x (int16x8_t __a, mve_pred16_t __p)
30789 {
30790 return __arm_vclsq_x_s16 (__a, __p);
30791 }
30792
30793 __extension__ extern __inline int32x4_t
30794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30795 __arm_vclsq_x (int32x4_t __a, mve_pred16_t __p)
30796 {
30797 return __arm_vclsq_x_s32 (__a, __p);
30798 }
30799
30800 __extension__ extern __inline int8x16_t
30801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30802 __arm_vclzq_x (int8x16_t __a, mve_pred16_t __p)
30803 {
30804 return __arm_vclzq_x_s8 (__a, __p);
30805 }
30806
30807 __extension__ extern __inline int16x8_t
30808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30809 __arm_vclzq_x (int16x8_t __a, mve_pred16_t __p)
30810 {
30811 return __arm_vclzq_x_s16 (__a, __p);
30812 }
30813
30814 __extension__ extern __inline int32x4_t
30815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30816 __arm_vclzq_x (int32x4_t __a, mve_pred16_t __p)
30817 {
30818 return __arm_vclzq_x_s32 (__a, __p);
30819 }
30820
30821 __extension__ extern __inline uint8x16_t
30822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30823 __arm_vclzq_x (uint8x16_t __a, mve_pred16_t __p)
30824 {
30825 return __arm_vclzq_x_u8 (__a, __p);
30826 }
30827
30828 __extension__ extern __inline uint16x8_t
30829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30830 __arm_vclzq_x (uint16x8_t __a, mve_pred16_t __p)
30831 {
30832 return __arm_vclzq_x_u16 (__a, __p);
30833 }
30834
30835 __extension__ extern __inline uint32x4_t
30836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30837 __arm_vclzq_x (uint32x4_t __a, mve_pred16_t __p)
30838 {
30839 return __arm_vclzq_x_u32 (__a, __p);
30840 }
30841
30842 __extension__ extern __inline int8x16_t
30843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30844 __arm_vnegq_x (int8x16_t __a, mve_pred16_t __p)
30845 {
30846 return __arm_vnegq_x_s8 (__a, __p);
30847 }
30848
30849 __extension__ extern __inline int16x8_t
30850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30851 __arm_vnegq_x (int16x8_t __a, mve_pred16_t __p)
30852 {
30853 return __arm_vnegq_x_s16 (__a, __p);
30854 }
30855
30856 __extension__ extern __inline int32x4_t
30857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30858 __arm_vnegq_x (int32x4_t __a, mve_pred16_t __p)
30859 {
30860 return __arm_vnegq_x_s32 (__a, __p);
30861 }
30862
30863 __extension__ extern __inline int8x16_t
30864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30865 __arm_vmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30866 {
30867 return __arm_vmulhq_x_s8 (__a, __b, __p);
30868 }
30869
30870 __extension__ extern __inline int16x8_t
30871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30872 __arm_vmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30873 {
30874 return __arm_vmulhq_x_s16 (__a, __b, __p);
30875 }
30876
30877 __extension__ extern __inline int32x4_t
30878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30879 __arm_vmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30880 {
30881 return __arm_vmulhq_x_s32 (__a, __b, __p);
30882 }
30883
30884 __extension__ extern __inline uint8x16_t
30885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30886 __arm_vmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30887 {
30888 return __arm_vmulhq_x_u8 (__a, __b, __p);
30889 }
30890
30891 __extension__ extern __inline uint16x8_t
30892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30893 __arm_vmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30894 {
30895 return __arm_vmulhq_x_u16 (__a, __b, __p);
30896 }
30897
30898 __extension__ extern __inline uint32x4_t
30899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30900 __arm_vmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30901 {
30902 return __arm_vmulhq_x_u32 (__a, __b, __p);
30903 }
30904
30905 __extension__ extern __inline uint16x8_t
30906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30907 __arm_vmullbq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30908 {
30909 return __arm_vmullbq_poly_x_p8 (__a, __b, __p);
30910 }
30911
30912 __extension__ extern __inline uint32x4_t
30913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30914 __arm_vmullbq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30915 {
30916 return __arm_vmullbq_poly_x_p16 (__a, __b, __p);
30917 }
30918
30919 __extension__ extern __inline int16x8_t
30920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30921 __arm_vmullbq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30922 {
30923 return __arm_vmullbq_int_x_s8 (__a, __b, __p);
30924 }
30925
30926 __extension__ extern __inline int32x4_t
30927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30928 __arm_vmullbq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30929 {
30930 return __arm_vmullbq_int_x_s16 (__a, __b, __p);
30931 }
30932
30933 __extension__ extern __inline int64x2_t
30934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30935 __arm_vmullbq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30936 {
30937 return __arm_vmullbq_int_x_s32 (__a, __b, __p);
30938 }
30939
30940 __extension__ extern __inline uint16x8_t
30941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30942 __arm_vmullbq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30943 {
30944 return __arm_vmullbq_int_x_u8 (__a, __b, __p);
30945 }
30946
30947 __extension__ extern __inline uint32x4_t
30948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30949 __arm_vmullbq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30950 {
30951 return __arm_vmullbq_int_x_u16 (__a, __b, __p);
30952 }
30953
30954 __extension__ extern __inline uint64x2_t
30955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30956 __arm_vmullbq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
30957 {
30958 return __arm_vmullbq_int_x_u32 (__a, __b, __p);
30959 }
30960
30961 __extension__ extern __inline uint16x8_t
30962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30963 __arm_vmulltq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30964 {
30965 return __arm_vmulltq_poly_x_p8 (__a, __b, __p);
30966 }
30967
30968 __extension__ extern __inline uint32x4_t
30969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30970 __arm_vmulltq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
30971 {
30972 return __arm_vmulltq_poly_x_p16 (__a, __b, __p);
30973 }
30974
30975 __extension__ extern __inline int16x8_t
30976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30977 __arm_vmulltq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
30978 {
30979 return __arm_vmulltq_int_x_s8 (__a, __b, __p);
30980 }
30981
30982 __extension__ extern __inline int32x4_t
30983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30984 __arm_vmulltq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
30985 {
30986 return __arm_vmulltq_int_x_s16 (__a, __b, __p);
30987 }
30988
30989 __extension__ extern __inline int64x2_t
30990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30991 __arm_vmulltq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
30992 {
30993 return __arm_vmulltq_int_x_s32 (__a, __b, __p);
30994 }
30995
30996 __extension__ extern __inline uint16x8_t
30997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30998 __arm_vmulltq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
30999 {
31000 return __arm_vmulltq_int_x_u8 (__a, __b, __p);
31001 }
31002
31003 __extension__ extern __inline uint32x4_t
31004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31005 __arm_vmulltq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31006 {
31007 return __arm_vmulltq_int_x_u16 (__a, __b, __p);
31008 }
31009
31010 __extension__ extern __inline uint64x2_t
31011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31012 __arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31013 {
31014 return __arm_vmulltq_int_x_u32 (__a, __b, __p);
31015 }
31016
31017 __extension__ extern __inline int8x16_t
31018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31019 __arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31020 {
31021 return __arm_vmulq_x_s8 (__a, __b, __p);
31022 }
31023
31024 __extension__ extern __inline int16x8_t
31025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31026 __arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31027 {
31028 return __arm_vmulq_x_s16 (__a, __b, __p);
31029 }
31030
31031 __extension__ extern __inline int32x4_t
31032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31033 __arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31034 {
31035 return __arm_vmulq_x_s32 (__a, __b, __p);
31036 }
31037
31038 __extension__ extern __inline int8x16_t
31039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31040 __arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31041 {
31042 return __arm_vmulq_x_n_s8 (__a, __b, __p);
31043 }
31044
31045 __extension__ extern __inline int16x8_t
31046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31047 __arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31048 {
31049 return __arm_vmulq_x_n_s16 (__a, __b, __p);
31050 }
31051
31052 __extension__ extern __inline int32x4_t
31053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31054 __arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31055 {
31056 return __arm_vmulq_x_n_s32 (__a, __b, __p);
31057 }
31058
31059 __extension__ extern __inline uint8x16_t
31060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31061 __arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31062 {
31063 return __arm_vmulq_x_u8 (__a, __b, __p);
31064 }
31065
31066 __extension__ extern __inline uint16x8_t
31067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31068 __arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31069 {
31070 return __arm_vmulq_x_u16 (__a, __b, __p);
31071 }
31072
31073 __extension__ extern __inline uint32x4_t
31074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31075 __arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31076 {
31077 return __arm_vmulq_x_u32 (__a, __b, __p);
31078 }
31079
31080 __extension__ extern __inline uint8x16_t
31081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31082 __arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31083 {
31084 return __arm_vmulq_x_n_u8 (__a, __b, __p);
31085 }
31086
31087 __extension__ extern __inline uint16x8_t
31088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31089 __arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31090 {
31091 return __arm_vmulq_x_n_u16 (__a, __b, __p);
31092 }
31093
31094 __extension__ extern __inline uint32x4_t
31095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31096 __arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31097 {
31098 return __arm_vmulq_x_n_u32 (__a, __b, __p);
31099 }
31100
31101 __extension__ extern __inline int8x16_t
31102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31103 __arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31104 {
31105 return __arm_vsubq_x_s8 (__a, __b, __p);
31106 }
31107
31108 __extension__ extern __inline int16x8_t
31109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31110 __arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31111 {
31112 return __arm_vsubq_x_s16 (__a, __b, __p);
31113 }
31114
31115 __extension__ extern __inline int32x4_t
31116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31117 __arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31118 {
31119 return __arm_vsubq_x_s32 (__a, __b, __p);
31120 }
31121
31122 __extension__ extern __inline int8x16_t
31123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31124 __arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31125 {
31126 return __arm_vsubq_x_n_s8 (__a, __b, __p);
31127 }
31128
31129 __extension__ extern __inline int16x8_t
31130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31131 __arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31132 {
31133 return __arm_vsubq_x_n_s16 (__a, __b, __p);
31134 }
31135
31136 __extension__ extern __inline int32x4_t
31137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31138 __arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31139 {
31140 return __arm_vsubq_x_n_s32 (__a, __b, __p);
31141 }
31142
31143 __extension__ extern __inline uint8x16_t
31144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31145 __arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31146 {
31147 return __arm_vsubq_x_u8 (__a, __b, __p);
31148 }
31149
31150 __extension__ extern __inline uint16x8_t
31151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31152 __arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31153 {
31154 return __arm_vsubq_x_u16 (__a, __b, __p);
31155 }
31156
31157 __extension__ extern __inline uint32x4_t
31158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31159 __arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31160 {
31161 return __arm_vsubq_x_u32 (__a, __b, __p);
31162 }
31163
31164 __extension__ extern __inline uint8x16_t
31165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31166 __arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31167 {
31168 return __arm_vsubq_x_n_u8 (__a, __b, __p);
31169 }
31170
31171 __extension__ extern __inline uint16x8_t
31172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31173 __arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31174 {
31175 return __arm_vsubq_x_n_u16 (__a, __b, __p);
31176 }
31177
31178 __extension__ extern __inline uint32x4_t
31179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31180 __arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31181 {
31182 return __arm_vsubq_x_n_u32 (__a, __b, __p);
31183 }
31184
31185 __extension__ extern __inline int8x16_t
31186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31187 __arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31188 {
31189 return __arm_vcaddq_rot90_x_s8 (__a, __b, __p);
31190 }
31191
31192 __extension__ extern __inline int16x8_t
31193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31194 __arm_vcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31195 {
31196 return __arm_vcaddq_rot90_x_s16 (__a, __b, __p);
31197 }
31198
31199 __extension__ extern __inline int32x4_t
31200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31201 __arm_vcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31202 {
31203 return __arm_vcaddq_rot90_x_s32 (__a, __b, __p);
31204 }
31205
31206 __extension__ extern __inline uint8x16_t
31207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31208 __arm_vcaddq_rot90_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31209 {
31210 return __arm_vcaddq_rot90_x_u8 (__a, __b, __p);
31211 }
31212
31213 __extension__ extern __inline uint16x8_t
31214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31215 __arm_vcaddq_rot90_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31216 {
31217 return __arm_vcaddq_rot90_x_u16 (__a, __b, __p);
31218 }
31219
31220 __extension__ extern __inline uint32x4_t
31221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31222 __arm_vcaddq_rot90_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31223 {
31224 return __arm_vcaddq_rot90_x_u32 (__a, __b, __p);
31225 }
31226
31227 __extension__ extern __inline int8x16_t
31228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31229 __arm_vcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31230 {
31231 return __arm_vcaddq_rot270_x_s8 (__a, __b, __p);
31232 }
31233
31234 __extension__ extern __inline int16x8_t
31235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31236 __arm_vcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31237 {
31238 return __arm_vcaddq_rot270_x_s16 (__a, __b, __p);
31239 }
31240
31241 __extension__ extern __inline int32x4_t
31242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31243 __arm_vcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31244 {
31245 return __arm_vcaddq_rot270_x_s32 (__a, __b, __p);
31246 }
31247
31248 __extension__ extern __inline uint8x16_t
31249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31250 __arm_vcaddq_rot270_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31251 {
31252 return __arm_vcaddq_rot270_x_u8 (__a, __b, __p);
31253 }
31254
31255 __extension__ extern __inline uint16x8_t
31256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31257 __arm_vcaddq_rot270_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31258 {
31259 return __arm_vcaddq_rot270_x_u16 (__a, __b, __p);
31260 }
31261
31262 __extension__ extern __inline uint32x4_t
31263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31264 __arm_vcaddq_rot270_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31265 {
31266 return __arm_vcaddq_rot270_x_u32 (__a, __b, __p);
31267 }
31268
31269 __extension__ extern __inline int8x16_t
31270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31271 __arm_vhaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31272 {
31273 return __arm_vhaddq_x_n_s8 (__a, __b, __p);
31274 }
31275
31276 __extension__ extern __inline int16x8_t
31277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31278 __arm_vhaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31279 {
31280 return __arm_vhaddq_x_n_s16 (__a, __b, __p);
31281 }
31282
31283 __extension__ extern __inline int32x4_t
31284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31285 __arm_vhaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31286 {
31287 return __arm_vhaddq_x_n_s32 (__a, __b, __p);
31288 }
31289
31290 __extension__ extern __inline uint8x16_t
31291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31292 __arm_vhaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31293 {
31294 return __arm_vhaddq_x_n_u8 (__a, __b, __p);
31295 }
31296
31297 __extension__ extern __inline uint16x8_t
31298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31299 __arm_vhaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31300 {
31301 return __arm_vhaddq_x_n_u16 (__a, __b, __p);
31302 }
31303
31304 __extension__ extern __inline uint32x4_t
31305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31306 __arm_vhaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31307 {
31308 return __arm_vhaddq_x_n_u32 (__a, __b, __p);
31309 }
31310
31311 __extension__ extern __inline int8x16_t
31312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31313 __arm_vhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31314 {
31315 return __arm_vhaddq_x_s8 (__a, __b, __p);
31316 }
31317
31318 __extension__ extern __inline int16x8_t
31319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31320 __arm_vhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31321 {
31322 return __arm_vhaddq_x_s16 (__a, __b, __p);
31323 }
31324
31325 __extension__ extern __inline int32x4_t
31326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31327 __arm_vhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31328 {
31329 return __arm_vhaddq_x_s32 (__a, __b, __p);
31330 }
31331
31332 __extension__ extern __inline uint8x16_t
31333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31334 __arm_vhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31335 {
31336 return __arm_vhaddq_x_u8 (__a, __b, __p);
31337 }
31338
31339 __extension__ extern __inline uint16x8_t
31340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31341 __arm_vhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31342 {
31343 return __arm_vhaddq_x_u16 (__a, __b, __p);
31344 }
31345
31346 __extension__ extern __inline uint32x4_t
31347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31348 __arm_vhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31349 {
31350 return __arm_vhaddq_x_u32 (__a, __b, __p);
31351 }
31352
31353 __extension__ extern __inline int8x16_t
31354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31355 __arm_vhcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31356 {
31357 return __arm_vhcaddq_rot90_x_s8 (__a, __b, __p);
31358 }
31359
31360 __extension__ extern __inline int16x8_t
31361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31362 __arm_vhcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31363 {
31364 return __arm_vhcaddq_rot90_x_s16 (__a, __b, __p);
31365 }
31366
31367 __extension__ extern __inline int32x4_t
31368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31369 __arm_vhcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31370 {
31371 return __arm_vhcaddq_rot90_x_s32 (__a, __b, __p);
31372 }
31373
31374 __extension__ extern __inline int8x16_t
31375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31376 __arm_vhcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31377 {
31378 return __arm_vhcaddq_rot270_x_s8 (__a, __b, __p);
31379 }
31380
31381 __extension__ extern __inline int16x8_t
31382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31383 __arm_vhcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31384 {
31385 return __arm_vhcaddq_rot270_x_s16 (__a, __b, __p);
31386 }
31387
31388 __extension__ extern __inline int32x4_t
31389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31390 __arm_vhcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31391 {
31392 return __arm_vhcaddq_rot270_x_s32 (__a, __b, __p);
31393 }
31394
31395 __extension__ extern __inline int8x16_t
31396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31397 __arm_vhsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
31398 {
31399 return __arm_vhsubq_x_n_s8 (__a, __b, __p);
31400 }
31401
31402 __extension__ extern __inline int16x8_t
31403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31404 __arm_vhsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
31405 {
31406 return __arm_vhsubq_x_n_s16 (__a, __b, __p);
31407 }
31408
31409 __extension__ extern __inline int32x4_t
31410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31411 __arm_vhsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31412 {
31413 return __arm_vhsubq_x_n_s32 (__a, __b, __p);
31414 }
31415
31416 __extension__ extern __inline uint8x16_t
31417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31418 __arm_vhsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
31419 {
31420 return __arm_vhsubq_x_n_u8 (__a, __b, __p);
31421 }
31422
31423 __extension__ extern __inline uint16x8_t
31424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31425 __arm_vhsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
31426 {
31427 return __arm_vhsubq_x_n_u16 (__a, __b, __p);
31428 }
31429
31430 __extension__ extern __inline uint32x4_t
31431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31432 __arm_vhsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
31433 {
31434 return __arm_vhsubq_x_n_u32 (__a, __b, __p);
31435 }
31436
31437 __extension__ extern __inline int8x16_t
31438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31439 __arm_vhsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31440 {
31441 return __arm_vhsubq_x_s8 (__a, __b, __p);
31442 }
31443
31444 __extension__ extern __inline int16x8_t
31445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31446 __arm_vhsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31447 {
31448 return __arm_vhsubq_x_s16 (__a, __b, __p);
31449 }
31450
31451 __extension__ extern __inline int32x4_t
31452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31453 __arm_vhsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31454 {
31455 return __arm_vhsubq_x_s32 (__a, __b, __p);
31456 }
31457
31458 __extension__ extern __inline uint8x16_t
31459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31460 __arm_vhsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31461 {
31462 return __arm_vhsubq_x_u8 (__a, __b, __p);
31463 }
31464
31465 __extension__ extern __inline uint16x8_t
31466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31467 __arm_vhsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31468 {
31469 return __arm_vhsubq_x_u16 (__a, __b, __p);
31470 }
31471
31472 __extension__ extern __inline uint32x4_t
31473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31474 __arm_vhsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31475 {
31476 return __arm_vhsubq_x_u32 (__a, __b, __p);
31477 }
31478
31479 __extension__ extern __inline int8x16_t
31480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31481 __arm_vrhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31482 {
31483 return __arm_vrhaddq_x_s8 (__a, __b, __p);
31484 }
31485
31486 __extension__ extern __inline int16x8_t
31487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31488 __arm_vrhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31489 {
31490 return __arm_vrhaddq_x_s16 (__a, __b, __p);
31491 }
31492
31493 __extension__ extern __inline int32x4_t
31494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31495 __arm_vrhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31496 {
31497 return __arm_vrhaddq_x_s32 (__a, __b, __p);
31498 }
31499
31500 __extension__ extern __inline uint8x16_t
31501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31502 __arm_vrhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31503 {
31504 return __arm_vrhaddq_x_u8 (__a, __b, __p);
31505 }
31506
31507 __extension__ extern __inline uint16x8_t
31508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31509 __arm_vrhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31510 {
31511 return __arm_vrhaddq_x_u16 (__a, __b, __p);
31512 }
31513
31514 __extension__ extern __inline uint32x4_t
31515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31516 __arm_vrhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31517 {
31518 return __arm_vrhaddq_x_u32 (__a, __b, __p);
31519 }
31520
31521 __extension__ extern __inline int8x16_t
31522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31523 __arm_vrmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31524 {
31525 return __arm_vrmulhq_x_s8 (__a, __b, __p);
31526 }
31527
31528 __extension__ extern __inline int16x8_t
31529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31530 __arm_vrmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31531 {
31532 return __arm_vrmulhq_x_s16 (__a, __b, __p);
31533 }
31534
31535 __extension__ extern __inline int32x4_t
31536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31537 __arm_vrmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31538 {
31539 return __arm_vrmulhq_x_s32 (__a, __b, __p);
31540 }
31541
31542 __extension__ extern __inline uint8x16_t
31543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31544 __arm_vrmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31545 {
31546 return __arm_vrmulhq_x_u8 (__a, __b, __p);
31547 }
31548
31549 __extension__ extern __inline uint16x8_t
31550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31551 __arm_vrmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31552 {
31553 return __arm_vrmulhq_x_u16 (__a, __b, __p);
31554 }
31555
31556 __extension__ extern __inline uint32x4_t
31557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31558 __arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31559 {
31560 return __arm_vrmulhq_x_u32 (__a, __b, __p);
31561 }
31562
31563 __extension__ extern __inline int8x16_t
31564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31565 __arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31566 {
31567 return __arm_vandq_x_s8 (__a, __b, __p);
31568 }
31569
31570 __extension__ extern __inline int16x8_t
31571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31572 __arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31573 {
31574 return __arm_vandq_x_s16 (__a, __b, __p);
31575 }
31576
31577 __extension__ extern __inline int32x4_t
31578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31579 __arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31580 {
31581 return __arm_vandq_x_s32 (__a, __b, __p);
31582 }
31583
31584 __extension__ extern __inline uint8x16_t
31585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31586 __arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31587 {
31588 return __arm_vandq_x_u8 (__a, __b, __p);
31589 }
31590
31591 __extension__ extern __inline uint16x8_t
31592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31593 __arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31594 {
31595 return __arm_vandq_x_u16 (__a, __b, __p);
31596 }
31597
31598 __extension__ extern __inline uint32x4_t
31599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31600 __arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31601 {
31602 return __arm_vandq_x_u32 (__a, __b, __p);
31603 }
31604
31605 __extension__ extern __inline int8x16_t
31606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31607 __arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31608 {
31609 return __arm_vbicq_x_s8 (__a, __b, __p);
31610 }
31611
31612 __extension__ extern __inline int16x8_t
31613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31614 __arm_vbicq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31615 {
31616 return __arm_vbicq_x_s16 (__a, __b, __p);
31617 }
31618
31619 __extension__ extern __inline int32x4_t
31620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31621 __arm_vbicq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31622 {
31623 return __arm_vbicq_x_s32 (__a, __b, __p);
31624 }
31625
31626 __extension__ extern __inline uint8x16_t
31627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31628 __arm_vbicq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31629 {
31630 return __arm_vbicq_x_u8 (__a, __b, __p);
31631 }
31632
31633 __extension__ extern __inline uint16x8_t
31634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31635 __arm_vbicq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31636 {
31637 return __arm_vbicq_x_u16 (__a, __b, __p);
31638 }
31639
31640 __extension__ extern __inline uint32x4_t
31641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31642 __arm_vbicq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31643 {
31644 return __arm_vbicq_x_u32 (__a, __b, __p);
31645 }
31646
31647 __extension__ extern __inline int8x16_t
31648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31649 __arm_vbrsrq_x (int8x16_t __a, int32_t __b, mve_pred16_t __p)
31650 {
31651 return __arm_vbrsrq_x_n_s8 (__a, __b, __p);
31652 }
31653
31654 __extension__ extern __inline int16x8_t
31655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31656 __arm_vbrsrq_x (int16x8_t __a, int32_t __b, mve_pred16_t __p)
31657 {
31658 return __arm_vbrsrq_x_n_s16 (__a, __b, __p);
31659 }
31660
31661 __extension__ extern __inline int32x4_t
31662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31663 __arm_vbrsrq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
31664 {
31665 return __arm_vbrsrq_x_n_s32 (__a, __b, __p);
31666 }
31667
31668 __extension__ extern __inline uint8x16_t
31669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31670 __arm_vbrsrq_x (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
31671 {
31672 return __arm_vbrsrq_x_n_u8 (__a, __b, __p);
31673 }
31674
31675 __extension__ extern __inline uint16x8_t
31676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31677 __arm_vbrsrq_x (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
31678 {
31679 return __arm_vbrsrq_x_n_u16 (__a, __b, __p);
31680 }
31681
31682 __extension__ extern __inline uint32x4_t
31683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31684 __arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
31685 {
31686 return __arm_vbrsrq_x_n_u32 (__a, __b, __p);
31687 }
31688
31689 __extension__ extern __inline int8x16_t
31690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31691 __arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31692 {
31693 return __arm_veorq_x_s8 (__a, __b, __p);
31694 }
31695
31696 __extension__ extern __inline int16x8_t
31697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31698 __arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31699 {
31700 return __arm_veorq_x_s16 (__a, __b, __p);
31701 }
31702
31703 __extension__ extern __inline int32x4_t
31704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31705 __arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31706 {
31707 return __arm_veorq_x_s32 (__a, __b, __p);
31708 }
31709
31710 __extension__ extern __inline uint8x16_t
31711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31712 __arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31713 {
31714 return __arm_veorq_x_u8 (__a, __b, __p);
31715 }
31716
31717 __extension__ extern __inline uint16x8_t
31718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31719 __arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31720 {
31721 return __arm_veorq_x_u16 (__a, __b, __p);
31722 }
31723
31724 __extension__ extern __inline uint32x4_t
31725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31726 __arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31727 {
31728 return __arm_veorq_x_u32 (__a, __b, __p);
31729 }
31730
31731 __extension__ extern __inline int16x8_t
31732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31733 __arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p)
31734 {
31735 return __arm_vmovlbq_x_s8 (__a, __p);
31736 }
31737
31738 __extension__ extern __inline int32x4_t
31739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31740 __arm_vmovlbq_x (int16x8_t __a, mve_pred16_t __p)
31741 {
31742 return __arm_vmovlbq_x_s16 (__a, __p);
31743 }
31744
31745 __extension__ extern __inline uint16x8_t
31746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31747 __arm_vmovlbq_x (uint8x16_t __a, mve_pred16_t __p)
31748 {
31749 return __arm_vmovlbq_x_u8 (__a, __p);
31750 }
31751
31752 __extension__ extern __inline uint32x4_t
31753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31754 __arm_vmovlbq_x (uint16x8_t __a, mve_pred16_t __p)
31755 {
31756 return __arm_vmovlbq_x_u16 (__a, __p);
31757 }
31758
31759 __extension__ extern __inline int16x8_t
31760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31761 __arm_vmovltq_x (int8x16_t __a, mve_pred16_t __p)
31762 {
31763 return __arm_vmovltq_x_s8 (__a, __p);
31764 }
31765
31766 __extension__ extern __inline int32x4_t
31767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31768 __arm_vmovltq_x (int16x8_t __a, mve_pred16_t __p)
31769 {
31770 return __arm_vmovltq_x_s16 (__a, __p);
31771 }
31772
31773 __extension__ extern __inline uint16x8_t
31774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31775 __arm_vmovltq_x (uint8x16_t __a, mve_pred16_t __p)
31776 {
31777 return __arm_vmovltq_x_u8 (__a, __p);
31778 }
31779
31780 __extension__ extern __inline uint32x4_t
31781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31782 __arm_vmovltq_x (uint16x8_t __a, mve_pred16_t __p)
31783 {
31784 return __arm_vmovltq_x_u16 (__a, __p);
31785 }
31786
31787 __extension__ extern __inline int8x16_t
31788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31789 __arm_vmvnq_x (int8x16_t __a, mve_pred16_t __p)
31790 {
31791 return __arm_vmvnq_x_s8 (__a, __p);
31792 }
31793
31794 __extension__ extern __inline int16x8_t
31795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31796 __arm_vmvnq_x (int16x8_t __a, mve_pred16_t __p)
31797 {
31798 return __arm_vmvnq_x_s16 (__a, __p);
31799 }
31800
31801 __extension__ extern __inline int32x4_t
31802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31803 __arm_vmvnq_x (int32x4_t __a, mve_pred16_t __p)
31804 {
31805 return __arm_vmvnq_x_s32 (__a, __p);
31806 }
31807
31808 __extension__ extern __inline uint8x16_t
31809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31810 __arm_vmvnq_x (uint8x16_t __a, mve_pred16_t __p)
31811 {
31812 return __arm_vmvnq_x_u8 (__a, __p);
31813 }
31814
31815 __extension__ extern __inline uint16x8_t
31816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31817 __arm_vmvnq_x (uint16x8_t __a, mve_pred16_t __p)
31818 {
31819 return __arm_vmvnq_x_u16 (__a, __p);
31820 }
31821
31822 __extension__ extern __inline uint32x4_t
31823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31824 __arm_vmvnq_x (uint32x4_t __a, mve_pred16_t __p)
31825 {
31826 return __arm_vmvnq_x_u32 (__a, __p);
31827 }
31828
31829 __extension__ extern __inline int8x16_t
31830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31831 __arm_vornq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31832 {
31833 return __arm_vornq_x_s8 (__a, __b, __p);
31834 }
31835
31836 __extension__ extern __inline int16x8_t
31837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31838 __arm_vornq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31839 {
31840 return __arm_vornq_x_s16 (__a, __b, __p);
31841 }
31842
31843 __extension__ extern __inline int32x4_t
31844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31845 __arm_vornq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31846 {
31847 return __arm_vornq_x_s32 (__a, __b, __p);
31848 }
31849
31850 __extension__ extern __inline uint8x16_t
31851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31852 __arm_vornq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31853 {
31854 return __arm_vornq_x_u8 (__a, __b, __p);
31855 }
31856
31857 __extension__ extern __inline uint16x8_t
31858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31859 __arm_vornq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31860 {
31861 return __arm_vornq_x_u16 (__a, __b, __p);
31862 }
31863
31864 __extension__ extern __inline uint32x4_t
31865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31866 __arm_vornq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31867 {
31868 return __arm_vornq_x_u32 (__a, __b, __p);
31869 }
31870
31871 __extension__ extern __inline int8x16_t
31872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31873 __arm_vorrq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
31874 {
31875 return __arm_vorrq_x_s8 (__a, __b, __p);
31876 }
31877
31878 __extension__ extern __inline int16x8_t
31879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31880 __arm_vorrq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
31881 {
31882 return __arm_vorrq_x_s16 (__a, __b, __p);
31883 }
31884
31885 __extension__ extern __inline int32x4_t
31886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31887 __arm_vorrq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
31888 {
31889 return __arm_vorrq_x_s32 (__a, __b, __p);
31890 }
31891
31892 __extension__ extern __inline uint8x16_t
31893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31894 __arm_vorrq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
31895 {
31896 return __arm_vorrq_x_u8 (__a, __b, __p);
31897 }
31898
31899 __extension__ extern __inline uint16x8_t
31900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31901 __arm_vorrq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
31902 {
31903 return __arm_vorrq_x_u16 (__a, __b, __p);
31904 }
31905
31906 __extension__ extern __inline uint32x4_t
31907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31908 __arm_vorrq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
31909 {
31910 return __arm_vorrq_x_u32 (__a, __b, __p);
31911 }
31912
31913 __extension__ extern __inline int8x16_t
31914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31915 __arm_vrev16q_x (int8x16_t __a, mve_pred16_t __p)
31916 {
31917 return __arm_vrev16q_x_s8 (__a, __p);
31918 }
31919
31920 __extension__ extern __inline uint8x16_t
31921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31922 __arm_vrev16q_x (uint8x16_t __a, mve_pred16_t __p)
31923 {
31924 return __arm_vrev16q_x_u8 (__a, __p);
31925 }
31926
31927 __extension__ extern __inline int8x16_t
31928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31929 __arm_vrev32q_x (int8x16_t __a, mve_pred16_t __p)
31930 {
31931 return __arm_vrev32q_x_s8 (__a, __p);
31932 }
31933
31934 __extension__ extern __inline int16x8_t
31935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31936 __arm_vrev32q_x (int16x8_t __a, mve_pred16_t __p)
31937 {
31938 return __arm_vrev32q_x_s16 (__a, __p);
31939 }
31940
31941 __extension__ extern __inline uint8x16_t
31942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31943 __arm_vrev32q_x (uint8x16_t __a, mve_pred16_t __p)
31944 {
31945 return __arm_vrev32q_x_u8 (__a, __p);
31946 }
31947
31948 __extension__ extern __inline uint16x8_t
31949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31950 __arm_vrev32q_x (uint16x8_t __a, mve_pred16_t __p)
31951 {
31952 return __arm_vrev32q_x_u16 (__a, __p);
31953 }
31954
31955 __extension__ extern __inline int8x16_t
31956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31957 __arm_vrev64q_x (int8x16_t __a, mve_pred16_t __p)
31958 {
31959 return __arm_vrev64q_x_s8 (__a, __p);
31960 }
31961
31962 __extension__ extern __inline int16x8_t
31963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31964 __arm_vrev64q_x (int16x8_t __a, mve_pred16_t __p)
31965 {
31966 return __arm_vrev64q_x_s16 (__a, __p);
31967 }
31968
31969 __extension__ extern __inline int32x4_t
31970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31971 __arm_vrev64q_x (int32x4_t __a, mve_pred16_t __p)
31972 {
31973 return __arm_vrev64q_x_s32 (__a, __p);
31974 }
31975
31976 __extension__ extern __inline uint8x16_t
31977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31978 __arm_vrev64q_x (uint8x16_t __a, mve_pred16_t __p)
31979 {
31980 return __arm_vrev64q_x_u8 (__a, __p);
31981 }
31982
31983 __extension__ extern __inline uint16x8_t
31984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31985 __arm_vrev64q_x (uint16x8_t __a, mve_pred16_t __p)
31986 {
31987 return __arm_vrev64q_x_u16 (__a, __p);
31988 }
31989
31990 __extension__ extern __inline uint32x4_t
31991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31992 __arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p)
31993 {
31994 return __arm_vrev64q_x_u32 (__a, __p);
31995 }
31996
31997 __extension__ extern __inline int8x16_t
31998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31999 __arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32000 {
32001 return __arm_vrshlq_x_s8 (__a, __b, __p);
32002 }
32003
32004 __extension__ extern __inline int16x8_t
32005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32006 __arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32007 {
32008 return __arm_vrshlq_x_s16 (__a, __b, __p);
32009 }
32010
32011 __extension__ extern __inline int32x4_t
32012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32013 __arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32014 {
32015 return __arm_vrshlq_x_s32 (__a, __b, __p);
32016 }
32017
32018 __extension__ extern __inline uint8x16_t
32019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32020 __arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32021 {
32022 return __arm_vrshlq_x_u8 (__a, __b, __p);
32023 }
32024
32025 __extension__ extern __inline uint16x8_t
32026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32027 __arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32028 {
32029 return __arm_vrshlq_x_u16 (__a, __b, __p);
32030 }
32031
32032 __extension__ extern __inline uint32x4_t
32033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32034 __arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32035 {
32036 return __arm_vrshlq_x_u32 (__a, __b, __p);
32037 }
32038
32039 __extension__ extern __inline int16x8_t
32040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32041 __arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32042 {
32043 return __arm_vshllbq_x_n_s8 (__a, __imm, __p);
32044 }
32045
32046 __extension__ extern __inline int32x4_t
32047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32048 __arm_vshllbq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32049 {
32050 return __arm_vshllbq_x_n_s16 (__a, __imm, __p);
32051 }
32052
32053 __extension__ extern __inline uint16x8_t
32054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32055 __arm_vshllbq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32056 {
32057 return __arm_vshllbq_x_n_u8 (__a, __imm, __p);
32058 }
32059
32060 __extension__ extern __inline uint32x4_t
32061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32062 __arm_vshllbq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32063 {
32064 return __arm_vshllbq_x_n_u16 (__a, __imm, __p);
32065 }
32066
32067 __extension__ extern __inline int16x8_t
32068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32069 __arm_vshlltq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32070 {
32071 return __arm_vshlltq_x_n_s8 (__a, __imm, __p);
32072 }
32073
32074 __extension__ extern __inline int32x4_t
32075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32076 __arm_vshlltq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32077 {
32078 return __arm_vshlltq_x_n_s16 (__a, __imm, __p);
32079 }
32080
32081 __extension__ extern __inline uint16x8_t
32082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32083 __arm_vshlltq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32084 {
32085 return __arm_vshlltq_x_n_u8 (__a, __imm, __p);
32086 }
32087
32088 __extension__ extern __inline uint32x4_t
32089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32090 __arm_vshlltq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32091 {
32092 return __arm_vshlltq_x_n_u16 (__a, __imm, __p);
32093 }
32094
32095 __extension__ extern __inline int8x16_t
32096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32097 __arm_vshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32098 {
32099 return __arm_vshlq_x_s8 (__a, __b, __p);
32100 }
32101
32102 __extension__ extern __inline int16x8_t
32103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32104 __arm_vshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32105 {
32106 return __arm_vshlq_x_s16 (__a, __b, __p);
32107 }
32108
32109 __extension__ extern __inline int32x4_t
32110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32111 __arm_vshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32112 {
32113 return __arm_vshlq_x_s32 (__a, __b, __p);
32114 }
32115
32116 __extension__ extern __inline uint8x16_t
32117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32118 __arm_vshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
32119 {
32120 return __arm_vshlq_x_u8 (__a, __b, __p);
32121 }
32122
32123 __extension__ extern __inline uint16x8_t
32124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32125 __arm_vshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
32126 {
32127 return __arm_vshlq_x_u16 (__a, __b, __p);
32128 }
32129
32130 __extension__ extern __inline uint32x4_t
32131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32132 __arm_vshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
32133 {
32134 return __arm_vshlq_x_u32 (__a, __b, __p);
32135 }
32136
32137 __extension__ extern __inline int8x16_t
32138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32139 __arm_vshlq_x_n (int8x16_t __a, const int __imm, mve_pred16_t __p)
32140 {
32141 return __arm_vshlq_x_n_s8 (__a, __imm, __p);
32142 }
32143
32144 __extension__ extern __inline int16x8_t
32145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32146 __arm_vshlq_x_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
32147 {
32148 return __arm_vshlq_x_n_s16 (__a, __imm, __p);
32149 }
32150
32151 __extension__ extern __inline int32x4_t
32152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32153 __arm_vshlq_x_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
32154 {
32155 return __arm_vshlq_x_n_s32 (__a, __imm, __p);
32156 }
32157
32158 __extension__ extern __inline uint8x16_t
32159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32160 __arm_vshlq_x_n (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32161 {
32162 return __arm_vshlq_x_n_u8 (__a, __imm, __p);
32163 }
32164
32165 __extension__ extern __inline uint16x8_t
32166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32167 __arm_vshlq_x_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32168 {
32169 return __arm_vshlq_x_n_u16 (__a, __imm, __p);
32170 }
32171
32172 __extension__ extern __inline uint32x4_t
32173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32174 __arm_vshlq_x_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32175 {
32176 return __arm_vshlq_x_n_u32 (__a, __imm, __p);
32177 }
32178
32179 __extension__ extern __inline int8x16_t
32180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32181 __arm_vrshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32182 {
32183 return __arm_vrshrq_x_n_s8 (__a, __imm, __p);
32184 }
32185
32186 __extension__ extern __inline int16x8_t
32187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32188 __arm_vrshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32189 {
32190 return __arm_vrshrq_x_n_s16 (__a, __imm, __p);
32191 }
32192
32193 __extension__ extern __inline int32x4_t
32194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32195 __arm_vrshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
32196 {
32197 return __arm_vrshrq_x_n_s32 (__a, __imm, __p);
32198 }
32199
32200 __extension__ extern __inline uint8x16_t
32201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32202 __arm_vrshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32203 {
32204 return __arm_vrshrq_x_n_u8 (__a, __imm, __p);
32205 }
32206
32207 __extension__ extern __inline uint16x8_t
32208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32209 __arm_vrshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32210 {
32211 return __arm_vrshrq_x_n_u16 (__a, __imm, __p);
32212 }
32213
32214 __extension__ extern __inline uint32x4_t
32215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32216 __arm_vrshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32217 {
32218 return __arm_vrshrq_x_n_u32 (__a, __imm, __p);
32219 }
32220
32221 __extension__ extern __inline int8x16_t
32222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32223 __arm_vshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
32224 {
32225 return __arm_vshrq_x_n_s8 (__a, __imm, __p);
32226 }
32227
32228 __extension__ extern __inline int16x8_t
32229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32230 __arm_vshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
32231 {
32232 return __arm_vshrq_x_n_s16 (__a, __imm, __p);
32233 }
32234
32235 __extension__ extern __inline int32x4_t
32236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32237 __arm_vshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
32238 {
32239 return __arm_vshrq_x_n_s32 (__a, __imm, __p);
32240 }
32241
32242 __extension__ extern __inline uint8x16_t
32243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32244 __arm_vshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
32245 {
32246 return __arm_vshrq_x_n_u8 (__a, __imm, __p);
32247 }
32248
32249 __extension__ extern __inline uint16x8_t
32250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32251 __arm_vshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
32252 {
32253 return __arm_vshrq_x_n_u16 (__a, __imm, __p);
32254 }
32255
32256 __extension__ extern __inline uint32x4_t
32257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32258 __arm_vshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
32259 {
32260 return __arm_vshrq_x_n_u32 (__a, __imm, __p);
32261 }
32262
32263 __extension__ extern __inline int32x4_t
32264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32265 __arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
32266 {
32267 return __arm_vadciq_s32 (__a, __b, __carry_out);
32268 }
32269
32270 __extension__ extern __inline uint32x4_t
32271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32272 __arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
32273 {
32274 return __arm_vadciq_u32 (__a, __b, __carry_out);
32275 }
32276
32277 __extension__ extern __inline int32x4_t
32278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32279 __arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32280 {
32281 return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
32282 }
32283
32284 __extension__ extern __inline uint32x4_t
32285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32286 __arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32287 {
32288 return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
32289 }
32290
32291 __extension__ extern __inline int32x4_t
32292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32293 __arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
32294 {
32295 return __arm_vadcq_s32 (__a, __b, __carry);
32296 }
32297
32298 __extension__ extern __inline uint32x4_t
32299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32300 __arm_vadcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
32301 {
32302 return __arm_vadcq_u32 (__a, __b, __carry);
32303 }
32304
32305 __extension__ extern __inline int32x4_t
32306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32307 __arm_vadcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32308 {
32309 return __arm_vadcq_m_s32 (__inactive, __a, __b, __carry, __p);
32310 }
32311
32312 __extension__ extern __inline uint32x4_t
32313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32314 __arm_vadcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32315 {
32316 return __arm_vadcq_m_u32 (__inactive, __a, __b, __carry, __p);
32317 }
32318
32319 __extension__ extern __inline int32x4_t
32320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32321 __arm_vsbciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
32322 {
32323 return __arm_vsbciq_s32 (__a, __b, __carry_out);
32324 }
32325
32326 __extension__ extern __inline uint32x4_t
32327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32328 __arm_vsbciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
32329 {
32330 return __arm_vsbciq_u32 (__a, __b, __carry_out);
32331 }
32332
32333 __extension__ extern __inline int32x4_t
32334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32335 __arm_vsbciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32336 {
32337 return __arm_vsbciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
32338 }
32339
32340 __extension__ extern __inline uint32x4_t
32341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32342 __arm_vsbciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
32343 {
32344 return __arm_vsbciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
32345 }
32346
32347 __extension__ extern __inline int32x4_t
32348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32349 __arm_vsbcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
32350 {
32351 return __arm_vsbcq_s32 (__a, __b, __carry);
32352 }
32353
32354 __extension__ extern __inline uint32x4_t
32355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32356 __arm_vsbcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
32357 {
32358 return __arm_vsbcq_u32 (__a, __b, __carry);
32359 }
32360
32361 __extension__ extern __inline int32x4_t
32362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32363 __arm_vsbcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32364 {
32365 return __arm_vsbcq_m_s32 (__inactive, __a, __b, __carry, __p);
32366 }
32367
32368 __extension__ extern __inline uint32x4_t
32369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32370 __arm_vsbcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
32371 {
32372 return __arm_vsbcq_m_u32 (__inactive, __a, __b, __carry, __p);
32373 }
32374
32375 __extension__ extern __inline void
32376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32377 __arm_vst1q_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
32378 {
32379 __arm_vst1q_p_u8 (__addr, __value, __p);
32380 }
32381
32382 __extension__ extern __inline void
32383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32384 __arm_vst1q_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
32385 {
32386 __arm_vst1q_p_s8 (__addr, __value, __p);
32387 }
32388
32389 __extension__ extern __inline void
32390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32391 __arm_vst2q (int8_t * __addr, int8x16x2_t __value)
32392 {
32393 __arm_vst2q_s8 (__addr, __value);
32394 }
32395
32396 __extension__ extern __inline void
32397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32398 __arm_vst2q (uint8_t * __addr, uint8x16x2_t __value)
32399 {
32400 __arm_vst2q_u8 (__addr, __value);
32401 }
32402
32403 __extension__ extern __inline uint8x16_t
32404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32405 __arm_vld1q_z (uint8_t const *__base, mve_pred16_t __p)
32406 {
32407 return __arm_vld1q_z_u8 (__base, __p);
32408 }
32409
32410 __extension__ extern __inline int8x16_t
32411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32412 __arm_vld1q_z (int8_t const *__base, mve_pred16_t __p)
32413 {
32414 return __arm_vld1q_z_s8 (__base, __p);
32415 }
32416
32417 __extension__ extern __inline int8x16x2_t
32418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32419 __arm_vld2q (int8_t const * __addr)
32420 {
32421 return __arm_vld2q_s8 (__addr);
32422 }
32423
32424 __extension__ extern __inline uint8x16x2_t
32425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32426 __arm_vld2q (uint8_t const * __addr)
32427 {
32428 return __arm_vld2q_u8 (__addr);
32429 }
32430
32431 __extension__ extern __inline int8x16x4_t
32432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32433 __arm_vld4q (int8_t const * __addr)
32434 {
32435 return __arm_vld4q_s8 (__addr);
32436 }
32437
32438 __extension__ extern __inline uint8x16x4_t
32439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32440 __arm_vld4q (uint8_t const * __addr)
32441 {
32442 return __arm_vld4q_u8 (__addr);
32443 }
32444
32445 __extension__ extern __inline void
32446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32447 __arm_vst1q_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
32448 {
32449 __arm_vst1q_p_u16 (__addr, __value, __p);
32450 }
32451
32452 __extension__ extern __inline void
32453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32454 __arm_vst1q_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
32455 {
32456 __arm_vst1q_p_s16 (__addr, __value, __p);
32457 }
32458
32459 __extension__ extern __inline void
32460 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32461 __arm_vst2q (int16_t * __addr, int16x8x2_t __value)
32462 {
32463 __arm_vst2q_s16 (__addr, __value);
32464 }
32465
32466 __extension__ extern __inline void
32467 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32468 __arm_vst2q (uint16_t * __addr, uint16x8x2_t __value)
32469 {
32470 __arm_vst2q_u16 (__addr, __value);
32471 }
32472
32473 __extension__ extern __inline uint16x8_t
32474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32475 __arm_vld1q_z (uint16_t const *__base, mve_pred16_t __p)
32476 {
32477 return __arm_vld1q_z_u16 (__base, __p);
32478 }
32479
32480 __extension__ extern __inline int16x8_t
32481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32482 __arm_vld1q_z (int16_t const *__base, mve_pred16_t __p)
32483 {
32484 return __arm_vld1q_z_s16 (__base, __p);
32485 }
32486
32487 __extension__ extern __inline int16x8x2_t
32488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32489 __arm_vld2q (int16_t const * __addr)
32490 {
32491 return __arm_vld2q_s16 (__addr);
32492 }
32493
32494 __extension__ extern __inline uint16x8x2_t
32495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32496 __arm_vld2q (uint16_t const * __addr)
32497 {
32498 return __arm_vld2q_u16 (__addr);
32499 }
32500
32501 __extension__ extern __inline int16x8x4_t
32502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32503 __arm_vld4q (int16_t const * __addr)
32504 {
32505 return __arm_vld4q_s16 (__addr);
32506 }
32507
32508 __extension__ extern __inline uint16x8x4_t
32509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32510 __arm_vld4q (uint16_t const * __addr)
32511 {
32512 return __arm_vld4q_u16 (__addr);
32513 }
32514
32515 __extension__ extern __inline void
32516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32517 __arm_vst1q_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
32518 {
32519 __arm_vst1q_p_u32 (__addr, __value, __p);
32520 }
32521
32522 __extension__ extern __inline void
32523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32524 __arm_vst1q_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
32525 {
32526 __arm_vst1q_p_s32 (__addr, __value, __p);
32527 }
32528
32529 __extension__ extern __inline void
32530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32531 __arm_vst2q (int32_t * __addr, int32x4x2_t __value)
32532 {
32533 __arm_vst2q_s32 (__addr, __value);
32534 }
32535
32536 __extension__ extern __inline void
32537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32538 __arm_vst2q (uint32_t * __addr, uint32x4x2_t __value)
32539 {
32540 __arm_vst2q_u32 (__addr, __value);
32541 }
32542
32543 __extension__ extern __inline uint32x4_t
32544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32545 __arm_vld1q_z (uint32_t const *__base, mve_pred16_t __p)
32546 {
32547 return __arm_vld1q_z_u32 (__base, __p);
32548 }
32549
32550 __extension__ extern __inline int32x4_t
32551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32552 __arm_vld1q_z (int32_t const *__base, mve_pred16_t __p)
32553 {
32554 return __arm_vld1q_z_s32 (__base, __p);
32555 }
32556
32557 __extension__ extern __inline int32x4x2_t
32558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32559 __arm_vld2q (int32_t const * __addr)
32560 {
32561 return __arm_vld2q_s32 (__addr);
32562 }
32563
32564 __extension__ extern __inline uint32x4x2_t
32565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32566 __arm_vld2q (uint32_t const * __addr)
32567 {
32568 return __arm_vld2q_u32 (__addr);
32569 }
32570
32571 __extension__ extern __inline int32x4x4_t
32572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32573 __arm_vld4q (int32_t const * __addr)
32574 {
32575 return __arm_vld4q_s32 (__addr);
32576 }
32577
32578 __extension__ extern __inline uint32x4x4_t
32579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32580 __arm_vld4q (uint32_t const * __addr)
32581 {
32582 return __arm_vld4q_u32 (__addr);
32583 }
32584
32585 __extension__ extern __inline int16x8_t
32586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32587 __arm_vsetq_lane (int16_t __a, int16x8_t __b, const int __idx)
32588 {
32589 return __arm_vsetq_lane_s16 (__a, __b, __idx);
32590 }
32591
32592 __extension__ extern __inline int32x4_t
32593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32594 __arm_vsetq_lane (int32_t __a, int32x4_t __b, const int __idx)
32595 {
32596 return __arm_vsetq_lane_s32 (__a, __b, __idx);
32597 }
32598
32599 __extension__ extern __inline int8x16_t
32600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32601 __arm_vsetq_lane (int8_t __a, int8x16_t __b, const int __idx)
32602 {
32603 return __arm_vsetq_lane_s8 (__a, __b, __idx);
32604 }
32605
32606 __extension__ extern __inline int64x2_t
32607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32608 __arm_vsetq_lane (int64_t __a, int64x2_t __b, const int __idx)
32609 {
32610 return __arm_vsetq_lane_s64 (__a, __b, __idx);
32611 }
32612
32613 __extension__ extern __inline uint8x16_t
32614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32615 __arm_vsetq_lane (uint8_t __a, uint8x16_t __b, const int __idx)
32616 {
32617 return __arm_vsetq_lane_u8 (__a, __b, __idx);
32618 }
32619
32620 __extension__ extern __inline uint16x8_t
32621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32622 __arm_vsetq_lane (uint16_t __a, uint16x8_t __b, const int __idx)
32623 {
32624 return __arm_vsetq_lane_u16 (__a, __b, __idx);
32625 }
32626
32627 __extension__ extern __inline uint32x4_t
32628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32629 __arm_vsetq_lane (uint32_t __a, uint32x4_t __b, const int __idx)
32630 {
32631 return __arm_vsetq_lane_u32 (__a, __b, __idx);
32632 }
32633
32634 __extension__ extern __inline uint64x2_t
32635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32636 __arm_vsetq_lane (uint64_t __a, uint64x2_t __b, const int __idx)
32637 {
32638 return __arm_vsetq_lane_u64 (__a, __b, __idx);
32639 }
32640
32641 __extension__ extern __inline int16_t
32642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32643 __arm_vgetq_lane (int16x8_t __a, const int __idx)
32644 {
32645 return __arm_vgetq_lane_s16 (__a, __idx);
32646 }
32647
32648 __extension__ extern __inline int32_t
32649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32650 __arm_vgetq_lane (int32x4_t __a, const int __idx)
32651 {
32652 return __arm_vgetq_lane_s32 (__a, __idx);
32653 }
32654
32655 __extension__ extern __inline int8_t
32656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32657 __arm_vgetq_lane (int8x16_t __a, const int __idx)
32658 {
32659 return __arm_vgetq_lane_s8 (__a, __idx);
32660 }
32661
32662 __extension__ extern __inline int64_t
32663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32664 __arm_vgetq_lane (int64x2_t __a, const int __idx)
32665 {
32666 return __arm_vgetq_lane_s64 (__a, __idx);
32667 }
32668
32669 __extension__ extern __inline uint8_t
32670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32671 __arm_vgetq_lane (uint8x16_t __a, const int __idx)
32672 {
32673 return __arm_vgetq_lane_u8 (__a, __idx);
32674 }
32675
32676 __extension__ extern __inline uint16_t
32677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32678 __arm_vgetq_lane (uint16x8_t __a, const int __idx)
32679 {
32680 return __arm_vgetq_lane_u16 (__a, __idx);
32681 }
32682
32683 __extension__ extern __inline uint32_t
32684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32685 __arm_vgetq_lane (uint32x4_t __a, const int __idx)
32686 {
32687 return __arm_vgetq_lane_u32 (__a, __idx);
32688 }
32689
32690 __extension__ extern __inline uint64_t
32691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32692 __arm_vgetq_lane (uint64x2_t __a, const int __idx)
32693 {
32694 return __arm_vgetq_lane_u64 (__a, __idx);
32695 }
32696
32697 __extension__ extern __inline int8x16_t
32698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32699 __arm_vshlcq_m (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32700 {
32701 return __arm_vshlcq_m_s8 (__a, __b, __imm, __p);
32702 }
32703
32704 __extension__ extern __inline uint8x16_t
32705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32706 __arm_vshlcq_m (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32707 {
32708 return __arm_vshlcq_m_u8 (__a, __b, __imm, __p);
32709 }
32710
32711 __extension__ extern __inline int16x8_t
32712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32713 __arm_vshlcq_m (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32714 {
32715 return __arm_vshlcq_m_s16 (__a, __b, __imm, __p);
32716 }
32717
32718 __extension__ extern __inline uint16x8_t
32719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32720 __arm_vshlcq_m (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32721 {
32722 return __arm_vshlcq_m_u16 (__a, __b, __imm, __p);
32723 }
32724
32725 __extension__ extern __inline int32x4_t
32726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32727 __arm_vshlcq_m (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32728 {
32729 return __arm_vshlcq_m_s32 (__a, __b, __imm, __p);
32730 }
32731
32732 __extension__ extern __inline uint32x4_t
32733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32734 __arm_vshlcq_m (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
32735 {
32736 return __arm_vshlcq_m_u32 (__a, __b, __imm, __p);
32737 }
32738
32739 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
32740
32741 __extension__ extern __inline void
32742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32743 __arm_vst4q (float16_t * __addr, float16x8x4_t __value)
32744 {
32745 __arm_vst4q_f16 (__addr, __value);
32746 }
32747
32748 __extension__ extern __inline void
32749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32750 __arm_vst4q (float32_t * __addr, float32x4x4_t __value)
32751 {
32752 __arm_vst4q_f32 (__addr, __value);
32753 }
32754
32755 __extension__ extern __inline float16x8_t
32756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32757 __arm_vrndxq (float16x8_t __a)
32758 {
32759 return __arm_vrndxq_f16 (__a);
32760 }
32761
32762 __extension__ extern __inline float32x4_t
32763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32764 __arm_vrndxq (float32x4_t __a)
32765 {
32766 return __arm_vrndxq_f32 (__a);
32767 }
32768
32769 __extension__ extern __inline float16x8_t
32770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32771 __arm_vrndq (float16x8_t __a)
32772 {
32773 return __arm_vrndq_f16 (__a);
32774 }
32775
32776 __extension__ extern __inline float32x4_t
32777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32778 __arm_vrndq (float32x4_t __a)
32779 {
32780 return __arm_vrndq_f32 (__a);
32781 }
32782
32783 __extension__ extern __inline float16x8_t
32784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32785 __arm_vrndpq (float16x8_t __a)
32786 {
32787 return __arm_vrndpq_f16 (__a);
32788 }
32789
32790 __extension__ extern __inline float32x4_t
32791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32792 __arm_vrndpq (float32x4_t __a)
32793 {
32794 return __arm_vrndpq_f32 (__a);
32795 }
32796
32797 __extension__ extern __inline float16x8_t
32798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32799 __arm_vrndnq (float16x8_t __a)
32800 {
32801 return __arm_vrndnq_f16 (__a);
32802 }
32803
32804 __extension__ extern __inline float32x4_t
32805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32806 __arm_vrndnq (float32x4_t __a)
32807 {
32808 return __arm_vrndnq_f32 (__a);
32809 }
32810
32811 __extension__ extern __inline float16x8_t
32812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32813 __arm_vrndmq (float16x8_t __a)
32814 {
32815 return __arm_vrndmq_f16 (__a);
32816 }
32817
32818 __extension__ extern __inline float32x4_t
32819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32820 __arm_vrndmq (float32x4_t __a)
32821 {
32822 return __arm_vrndmq_f32 (__a);
32823 }
32824
32825 __extension__ extern __inline float16x8_t
32826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32827 __arm_vrndaq (float16x8_t __a)
32828 {
32829 return __arm_vrndaq_f16 (__a);
32830 }
32831
32832 __extension__ extern __inline float32x4_t
32833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32834 __arm_vrndaq (float32x4_t __a)
32835 {
32836 return __arm_vrndaq_f32 (__a);
32837 }
32838
32839 __extension__ extern __inline float16x8_t
32840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32841 __arm_vrev64q (float16x8_t __a)
32842 {
32843 return __arm_vrev64q_f16 (__a);
32844 }
32845
32846 __extension__ extern __inline float32x4_t
32847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32848 __arm_vrev64q (float32x4_t __a)
32849 {
32850 return __arm_vrev64q_f32 (__a);
32851 }
32852
32853 __extension__ extern __inline float16x8_t
32854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32855 __arm_vnegq (float16x8_t __a)
32856 {
32857 return __arm_vnegq_f16 (__a);
32858 }
32859
32860 __extension__ extern __inline float32x4_t
32861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32862 __arm_vnegq (float32x4_t __a)
32863 {
32864 return __arm_vnegq_f32 (__a);
32865 }
32866
32867 __extension__ extern __inline float16x8_t
32868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32869 __arm_vdupq_n (float16_t __a)
32870 {
32871 return __arm_vdupq_n_f16 (__a);
32872 }
32873
32874 __extension__ extern __inline float32x4_t
32875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32876 __arm_vdupq_n (float32_t __a)
32877 {
32878 return __arm_vdupq_n_f32 (__a);
32879 }
32880
32881 __extension__ extern __inline float16x8_t
32882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32883 __arm_vabsq (float16x8_t __a)
32884 {
32885 return __arm_vabsq_f16 (__a);
32886 }
32887
32888 __extension__ extern __inline float32x4_t
32889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32890 __arm_vabsq (float32x4_t __a)
32891 {
32892 return __arm_vabsq_f32 (__a);
32893 }
32894
32895 __extension__ extern __inline float16x8_t
32896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32897 __arm_vrev32q (float16x8_t __a)
32898 {
32899 return __arm_vrev32q_f16 (__a);
32900 }
32901
32902 __extension__ extern __inline float32x4_t
32903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32904 __arm_vcvttq_f32 (float16x8_t __a)
32905 {
32906 return __arm_vcvttq_f32_f16 (__a);
32907 }
32908
32909 __extension__ extern __inline float32x4_t
32910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32911 __arm_vcvtbq_f32 (float16x8_t __a)
32912 {
32913 return __arm_vcvtbq_f32_f16 (__a);
32914 }
32915
32916 __extension__ extern __inline float16x8_t
32917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32918 __arm_vcvtq (int16x8_t __a)
32919 {
32920 return __arm_vcvtq_f16_s16 (__a);
32921 }
32922
32923 __extension__ extern __inline float32x4_t
32924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32925 __arm_vcvtq (int32x4_t __a)
32926 {
32927 return __arm_vcvtq_f32_s32 (__a);
32928 }
32929
32930 __extension__ extern __inline float16x8_t
32931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32932 __arm_vcvtq (uint16x8_t __a)
32933 {
32934 return __arm_vcvtq_f16_u16 (__a);
32935 }
32936
32937 __extension__ extern __inline float32x4_t
32938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32939 __arm_vcvtq (uint32x4_t __a)
32940 {
32941 return __arm_vcvtq_f32_u32 (__a);
32942 }
32943
32944 __extension__ extern __inline float16x8_t
32945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32946 __arm_vsubq (float16x8_t __a, float16_t __b)
32947 {
32948 return __arm_vsubq_n_f16 (__a, __b);
32949 }
32950
32951 __extension__ extern __inline float32x4_t
32952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32953 __arm_vsubq (float32x4_t __a, float32_t __b)
32954 {
32955 return __arm_vsubq_n_f32 (__a, __b);
32956 }
32957
32958 __extension__ extern __inline float16x8_t
32959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32960 __arm_vbrsrq (float16x8_t __a, int32_t __b)
32961 {
32962 return __arm_vbrsrq_n_f16 (__a, __b);
32963 }
32964
32965 __extension__ extern __inline float32x4_t
32966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32967 __arm_vbrsrq (float32x4_t __a, int32_t __b)
32968 {
32969 return __arm_vbrsrq_n_f32 (__a, __b);
32970 }
32971
32972 __extension__ extern __inline float16x8_t
32973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32974 __arm_vcvtq_n (int16x8_t __a, const int __imm6)
32975 {
32976 return __arm_vcvtq_n_f16_s16 (__a, __imm6);
32977 }
32978
32979 __extension__ extern __inline float32x4_t
32980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32981 __arm_vcvtq_n (int32x4_t __a, const int __imm6)
32982 {
32983 return __arm_vcvtq_n_f32_s32 (__a, __imm6);
32984 }
32985
32986 __extension__ extern __inline float16x8_t
32987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32988 __arm_vcvtq_n (uint16x8_t __a, const int __imm6)
32989 {
32990 return __arm_vcvtq_n_f16_u16 (__a, __imm6);
32991 }
32992
32993 __extension__ extern __inline float32x4_t
32994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32995 __arm_vcvtq_n (uint32x4_t __a, const int __imm6)
32996 {
32997 return __arm_vcvtq_n_f32_u32 (__a, __imm6);
32998 }
32999
33000 __extension__ extern __inline mve_pred16_t
33001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33002 __arm_vcmpneq (float16x8_t __a, float16_t __b)
33003 {
33004 return __arm_vcmpneq_n_f16 (__a, __b);
33005 }
33006
33007 __extension__ extern __inline mve_pred16_t
33008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33009 __arm_vcmpneq (float16x8_t __a, float16x8_t __b)
33010 {
33011 return __arm_vcmpneq_f16 (__a, __b);
33012 }
33013
33014 __extension__ extern __inline mve_pred16_t
33015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33016 __arm_vcmpltq (float16x8_t __a, float16_t __b)
33017 {
33018 return __arm_vcmpltq_n_f16 (__a, __b);
33019 }
33020
33021 __extension__ extern __inline mve_pred16_t
33022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33023 __arm_vcmpltq (float16x8_t __a, float16x8_t __b)
33024 {
33025 return __arm_vcmpltq_f16 (__a, __b);
33026 }
33027
33028 __extension__ extern __inline mve_pred16_t
33029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33030 __arm_vcmpleq (float16x8_t __a, float16_t __b)
33031 {
33032 return __arm_vcmpleq_n_f16 (__a, __b);
33033 }
33034
33035 __extension__ extern __inline mve_pred16_t
33036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33037 __arm_vcmpleq (float16x8_t __a, float16x8_t __b)
33038 {
33039 return __arm_vcmpleq_f16 (__a, __b);
33040 }
33041
33042 __extension__ extern __inline mve_pred16_t
33043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33044 __arm_vcmpgtq (float16x8_t __a, float16_t __b)
33045 {
33046 return __arm_vcmpgtq_n_f16 (__a, __b);
33047 }
33048
33049 __extension__ extern __inline mve_pred16_t
33050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33051 __arm_vcmpgtq (float16x8_t __a, float16x8_t __b)
33052 {
33053 return __arm_vcmpgtq_f16 (__a, __b);
33054 }
33055
33056 __extension__ extern __inline mve_pred16_t
33057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33058 __arm_vcmpgeq (float16x8_t __a, float16_t __b)
33059 {
33060 return __arm_vcmpgeq_n_f16 (__a, __b);
33061 }
33062
33063 __extension__ extern __inline mve_pred16_t
33064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33065 __arm_vcmpgeq (float16x8_t __a, float16x8_t __b)
33066 {
33067 return __arm_vcmpgeq_f16 (__a, __b);
33068 }
33069
33070 __extension__ extern __inline mve_pred16_t
33071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33072 __arm_vcmpeqq (float16x8_t __a, float16_t __b)
33073 {
33074 return __arm_vcmpeqq_n_f16 (__a, __b);
33075 }
33076
33077 __extension__ extern __inline mve_pred16_t
33078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33079 __arm_vcmpeqq (float16x8_t __a, float16x8_t __b)
33080 {
33081 return __arm_vcmpeqq_f16 (__a, __b);
33082 }
33083
33084 __extension__ extern __inline float16x8_t
33085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33086 __arm_vsubq (float16x8_t __a, float16x8_t __b)
33087 {
33088 return __arm_vsubq_f16 (__a, __b);
33089 }
33090
33091 __extension__ extern __inline float16x8_t
33092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33093 __arm_vorrq (float16x8_t __a, float16x8_t __b)
33094 {
33095 return __arm_vorrq_f16 (__a, __b);
33096 }
33097
33098 __extension__ extern __inline float16x8_t
33099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33100 __arm_vornq (float16x8_t __a, float16x8_t __b)
33101 {
33102 return __arm_vornq_f16 (__a, __b);
33103 }
33104
33105 __extension__ extern __inline float16x8_t
33106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33107 __arm_vmulq (float16x8_t __a, float16_t __b)
33108 {
33109 return __arm_vmulq_n_f16 (__a, __b);
33110 }
33111
33112 __extension__ extern __inline float16x8_t
33113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33114 __arm_vmulq (float16x8_t __a, float16x8_t __b)
33115 {
33116 return __arm_vmulq_f16 (__a, __b);
33117 }
33118
33119 __extension__ extern __inline float16_t
33120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33121 __arm_vminnmvq (float16_t __a, float16x8_t __b)
33122 {
33123 return __arm_vminnmvq_f16 (__a, __b);
33124 }
33125
33126 __extension__ extern __inline float16x8_t
33127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33128 __arm_vminnmq (float16x8_t __a, float16x8_t __b)
33129 {
33130 return __arm_vminnmq_f16 (__a, __b);
33131 }
33132
33133 __extension__ extern __inline float16_t
33134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33135 __arm_vminnmavq (float16_t __a, float16x8_t __b)
33136 {
33137 return __arm_vminnmavq_f16 (__a, __b);
33138 }
33139
33140 __extension__ extern __inline float16x8_t
33141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33142 __arm_vminnmaq (float16x8_t __a, float16x8_t __b)
33143 {
33144 return __arm_vminnmaq_f16 (__a, __b);
33145 }
33146
33147 __extension__ extern __inline float16_t
33148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33149 __arm_vmaxnmvq (float16_t __a, float16x8_t __b)
33150 {
33151 return __arm_vmaxnmvq_f16 (__a, __b);
33152 }
33153
33154 __extension__ extern __inline float16x8_t
33155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33156 __arm_vmaxnmq (float16x8_t __a, float16x8_t __b)
33157 {
33158 return __arm_vmaxnmq_f16 (__a, __b);
33159 }
33160
33161 __extension__ extern __inline float16_t
33162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33163 __arm_vmaxnmavq (float16_t __a, float16x8_t __b)
33164 {
33165 return __arm_vmaxnmavq_f16 (__a, __b);
33166 }
33167
33168 __extension__ extern __inline float16x8_t
33169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33170 __arm_vmaxnmaq (float16x8_t __a, float16x8_t __b)
33171 {
33172 return __arm_vmaxnmaq_f16 (__a, __b);
33173 }
33174
33175 __extension__ extern __inline float16x8_t
33176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33177 __arm_veorq (float16x8_t __a, float16x8_t __b)
33178 {
33179 return __arm_veorq_f16 (__a, __b);
33180 }
33181
33182 __extension__ extern __inline float16x8_t
33183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33184 __arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b)
33185 {
33186 return __arm_vcmulq_rot90_f16 (__a, __b);
33187 }
33188
33189 __extension__ extern __inline float16x8_t
33190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33191 __arm_vcmulq_rot270 (float16x8_t __a, float16x8_t __b)
33192 {
33193 return __arm_vcmulq_rot270_f16 (__a, __b);
33194 }
33195
33196 __extension__ extern __inline float16x8_t
33197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33198 __arm_vcmulq_rot180 (float16x8_t __a, float16x8_t __b)
33199 {
33200 return __arm_vcmulq_rot180_f16 (__a, __b);
33201 }
33202
33203 __extension__ extern __inline float16x8_t
33204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33205 __arm_vcmulq (float16x8_t __a, float16x8_t __b)
33206 {
33207 return __arm_vcmulq_f16 (__a, __b);
33208 }
33209
33210 __extension__ extern __inline float16x8_t
33211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33212 __arm_vcaddq_rot90 (float16x8_t __a, float16x8_t __b)
33213 {
33214 return __arm_vcaddq_rot90_f16 (__a, __b);
33215 }
33216
33217 __extension__ extern __inline float16x8_t
33218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33219 __arm_vcaddq_rot270 (float16x8_t __a, float16x8_t __b)
33220 {
33221 return __arm_vcaddq_rot270_f16 (__a, __b);
33222 }
33223
33224 __extension__ extern __inline float16x8_t
33225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33226 __arm_vbicq (float16x8_t __a, float16x8_t __b)
33227 {
33228 return __arm_vbicq_f16 (__a, __b);
33229 }
33230
33231 __extension__ extern __inline float16x8_t
33232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33233 __arm_vandq (float16x8_t __a, float16x8_t __b)
33234 {
33235 return __arm_vandq_f16 (__a, __b);
33236 }
33237
33238 __extension__ extern __inline float16x8_t
33239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33240 __arm_vaddq (float16x8_t __a, float16_t __b)
33241 {
33242 return __arm_vaddq_n_f16 (__a, __b);
33243 }
33244
33245 __extension__ extern __inline float16x8_t
33246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33247 __arm_vabdq (float16x8_t __a, float16x8_t __b)
33248 {
33249 return __arm_vabdq_f16 (__a, __b);
33250 }
33251
33252 __extension__ extern __inline mve_pred16_t
33253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33254 __arm_vcmpneq (float32x4_t __a, float32_t __b)
33255 {
33256 return __arm_vcmpneq_n_f32 (__a, __b);
33257 }
33258
33259 __extension__ extern __inline mve_pred16_t
33260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33261 __arm_vcmpneq (float32x4_t __a, float32x4_t __b)
33262 {
33263 return __arm_vcmpneq_f32 (__a, __b);
33264 }
33265
33266 __extension__ extern __inline mve_pred16_t
33267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33268 __arm_vcmpltq (float32x4_t __a, float32_t __b)
33269 {
33270 return __arm_vcmpltq_n_f32 (__a, __b);
33271 }
33272
33273 __extension__ extern __inline mve_pred16_t
33274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33275 __arm_vcmpltq (float32x4_t __a, float32x4_t __b)
33276 {
33277 return __arm_vcmpltq_f32 (__a, __b);
33278 }
33279
33280 __extension__ extern __inline mve_pred16_t
33281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33282 __arm_vcmpleq (float32x4_t __a, float32_t __b)
33283 {
33284 return __arm_vcmpleq_n_f32 (__a, __b);
33285 }
33286
33287 __extension__ extern __inline mve_pred16_t
33288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33289 __arm_vcmpleq (float32x4_t __a, float32x4_t __b)
33290 {
33291 return __arm_vcmpleq_f32 (__a, __b);
33292 }
33293
33294 __extension__ extern __inline mve_pred16_t
33295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33296 __arm_vcmpgtq (float32x4_t __a, float32_t __b)
33297 {
33298 return __arm_vcmpgtq_n_f32 (__a, __b);
33299 }
33300
33301 __extension__ extern __inline mve_pred16_t
33302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33303 __arm_vcmpgtq (float32x4_t __a, float32x4_t __b)
33304 {
33305 return __arm_vcmpgtq_f32 (__a, __b);
33306 }
33307
33308 __extension__ extern __inline mve_pred16_t
33309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33310 __arm_vcmpgeq (float32x4_t __a, float32_t __b)
33311 {
33312 return __arm_vcmpgeq_n_f32 (__a, __b);
33313 }
33314
33315 __extension__ extern __inline mve_pred16_t
33316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33317 __arm_vcmpgeq (float32x4_t __a, float32x4_t __b)
33318 {
33319 return __arm_vcmpgeq_f32 (__a, __b);
33320 }
33321
33322 __extension__ extern __inline mve_pred16_t
33323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33324 __arm_vcmpeqq (float32x4_t __a, float32_t __b)
33325 {
33326 return __arm_vcmpeqq_n_f32 (__a, __b);
33327 }
33328
33329 __extension__ extern __inline mve_pred16_t
33330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33331 __arm_vcmpeqq (float32x4_t __a, float32x4_t __b)
33332 {
33333 return __arm_vcmpeqq_f32 (__a, __b);
33334 }
33335
33336 __extension__ extern __inline float32x4_t
33337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33338 __arm_vsubq (float32x4_t __a, float32x4_t __b)
33339 {
33340 return __arm_vsubq_f32 (__a, __b);
33341 }
33342
33343 __extension__ extern __inline float32x4_t
33344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33345 __arm_vorrq (float32x4_t __a, float32x4_t __b)
33346 {
33347 return __arm_vorrq_f32 (__a, __b);
33348 }
33349
33350 __extension__ extern __inline float32x4_t
33351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33352 __arm_vornq (float32x4_t __a, float32x4_t __b)
33353 {
33354 return __arm_vornq_f32 (__a, __b);
33355 }
33356
33357 __extension__ extern __inline float32x4_t
33358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33359 __arm_vmulq (float32x4_t __a, float32_t __b)
33360 {
33361 return __arm_vmulq_n_f32 (__a, __b);
33362 }
33363
33364 __extension__ extern __inline float32x4_t
33365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33366 __arm_vmulq (float32x4_t __a, float32x4_t __b)
33367 {
33368 return __arm_vmulq_f32 (__a, __b);
33369 }
33370
33371 __extension__ extern __inline float32_t
33372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33373 __arm_vminnmvq (float32_t __a, float32x4_t __b)
33374 {
33375 return __arm_vminnmvq_f32 (__a, __b);
33376 }
33377
33378 __extension__ extern __inline float32x4_t
33379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33380 __arm_vminnmq (float32x4_t __a, float32x4_t __b)
33381 {
33382 return __arm_vminnmq_f32 (__a, __b);
33383 }
33384
33385 __extension__ extern __inline float32_t
33386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33387 __arm_vminnmavq (float32_t __a, float32x4_t __b)
33388 {
33389 return __arm_vminnmavq_f32 (__a, __b);
33390 }
33391
33392 __extension__ extern __inline float32x4_t
33393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33394 __arm_vminnmaq (float32x4_t __a, float32x4_t __b)
33395 {
33396 return __arm_vminnmaq_f32 (__a, __b);
33397 }
33398
33399 __extension__ extern __inline float32_t
33400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33401 __arm_vmaxnmvq (float32_t __a, float32x4_t __b)
33402 {
33403 return __arm_vmaxnmvq_f32 (__a, __b);
33404 }
33405
33406 __extension__ extern __inline float32x4_t
33407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33408 __arm_vmaxnmq (float32x4_t __a, float32x4_t __b)
33409 {
33410 return __arm_vmaxnmq_f32 (__a, __b);
33411 }
33412
33413 __extension__ extern __inline float32_t
33414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33415 __arm_vmaxnmavq (float32_t __a, float32x4_t __b)
33416 {
33417 return __arm_vmaxnmavq_f32 (__a, __b);
33418 }
33419
33420 __extension__ extern __inline float32x4_t
33421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33422 __arm_vmaxnmaq (float32x4_t __a, float32x4_t __b)
33423 {
33424 return __arm_vmaxnmaq_f32 (__a, __b);
33425 }
33426
33427 __extension__ extern __inline float32x4_t
33428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33429 __arm_veorq (float32x4_t __a, float32x4_t __b)
33430 {
33431 return __arm_veorq_f32 (__a, __b);
33432 }
33433
33434 __extension__ extern __inline float32x4_t
33435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33436 __arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b)
33437 {
33438 return __arm_vcmulq_rot90_f32 (__a, __b);
33439 }
33440
33441 __extension__ extern __inline float32x4_t
33442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33443 __arm_vcmulq_rot270 (float32x4_t __a, float32x4_t __b)
33444 {
33445 return __arm_vcmulq_rot270_f32 (__a, __b);
33446 }
33447
33448 __extension__ extern __inline float32x4_t
33449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33450 __arm_vcmulq_rot180 (float32x4_t __a, float32x4_t __b)
33451 {
33452 return __arm_vcmulq_rot180_f32 (__a, __b);
33453 }
33454
33455 __extension__ extern __inline float32x4_t
33456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33457 __arm_vcmulq (float32x4_t __a, float32x4_t __b)
33458 {
33459 return __arm_vcmulq_f32 (__a, __b);
33460 }
33461
33462 __extension__ extern __inline float32x4_t
33463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33464 __arm_vcaddq_rot90 (float32x4_t __a, float32x4_t __b)
33465 {
33466 return __arm_vcaddq_rot90_f32 (__a, __b);
33467 }
33468
33469 __extension__ extern __inline float32x4_t
33470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33471 __arm_vcaddq_rot270 (float32x4_t __a, float32x4_t __b)
33472 {
33473 return __arm_vcaddq_rot270_f32 (__a, __b);
33474 }
33475
33476 __extension__ extern __inline float32x4_t
33477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33478 __arm_vbicq (float32x4_t __a, float32x4_t __b)
33479 {
33480 return __arm_vbicq_f32 (__a, __b);
33481 }
33482
33483 __extension__ extern __inline float32x4_t
33484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33485 __arm_vandq (float32x4_t __a, float32x4_t __b)
33486 {
33487 return __arm_vandq_f32 (__a, __b);
33488 }
33489
33490 __extension__ extern __inline float32x4_t
33491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33492 __arm_vaddq (float32x4_t __a, float32_t __b)
33493 {
33494 return __arm_vaddq_n_f32 (__a, __b);
33495 }
33496
33497 __extension__ extern __inline float32x4_t
33498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33499 __arm_vabdq (float32x4_t __a, float32x4_t __b)
33500 {
33501 return __arm_vabdq_f32 (__a, __b);
33502 }
33503
33504 __extension__ extern __inline mve_pred16_t
33505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33506 __arm_vcmpeqq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33507 {
33508 return __arm_vcmpeqq_m_f16 (__a, __b, __p);
33509 }
33510
33511 __extension__ extern __inline mve_pred16_t
33512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33513 __arm_vcmpeqq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
33514 {
33515 return __arm_vcmpeqq_m_f32 (__a, __b, __p);
33516 }
33517
33518 __extension__ extern __inline int16x8_t
33519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33520 __arm_vcvtaq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33521 {
33522 return __arm_vcvtaq_m_s16_f16 (__inactive, __a, __p);
33523 }
33524
33525 __extension__ extern __inline uint16x8_t
33526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33527 __arm_vcvtaq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33528 {
33529 return __arm_vcvtaq_m_u16_f16 (__inactive, __a, __p);
33530 }
33531
33532 __extension__ extern __inline int32x4_t
33533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33534 __arm_vcvtaq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33535 {
33536 return __arm_vcvtaq_m_s32_f32 (__inactive, __a, __p);
33537 }
33538
33539 __extension__ extern __inline uint32x4_t
33540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33541 __arm_vcvtaq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33542 {
33543 return __arm_vcvtaq_m_u32_f32 (__inactive, __a, __p);
33544 }
33545
33546 __extension__ extern __inline float16x8_t
33547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33548 __arm_vcvtq_m (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
33549 {
33550 return __arm_vcvtq_m_f16_s16 (__inactive, __a, __p);
33551 }
33552
33553 __extension__ extern __inline float16x8_t
33554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33555 __arm_vcvtq_m (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
33556 {
33557 return __arm_vcvtq_m_f16_u16 (__inactive, __a, __p);
33558 }
33559
33560 __extension__ extern __inline float32x4_t
33561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33562 __arm_vcvtq_m (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
33563 {
33564 return __arm_vcvtq_m_f32_s32 (__inactive, __a, __p);
33565 }
33566
33567 __extension__ extern __inline float32x4_t
33568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33569 __arm_vcvtq_m (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
33570 {
33571 return __arm_vcvtq_m_f32_u32 (__inactive, __a, __p);
33572 }
33573
33574 __extension__ extern __inline float16x8_t
33575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33576 __arm_vcvtbq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
33577 {
33578 return __arm_vcvtbq_m_f16_f32 (__a, __b, __p);
33579 }
33580
33581 __extension__ extern __inline float32x4_t
33582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33583 __arm_vcvtbq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
33584 {
33585 return __arm_vcvtbq_m_f32_f16 (__inactive, __a, __p);
33586 }
33587
33588 __extension__ extern __inline float16x8_t
33589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33590 __arm_vcvttq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
33591 {
33592 return __arm_vcvttq_m_f16_f32 (__a, __b, __p);
33593 }
33594
33595 __extension__ extern __inline float32x4_t
33596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33597 __arm_vcvttq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
33598 {
33599 return __arm_vcvttq_m_f32_f16 (__inactive, __a, __p);
33600 }
33601
33602 __extension__ extern __inline float16x8_t
33603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33604 __arm_vrev32q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33605 {
33606 return __arm_vrev32q_m_f16 (__inactive, __a, __p);
33607 }
33608
33609 __extension__ extern __inline float16x8_t
33610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33611 __arm_vcmlaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33612 {
33613 return __arm_vcmlaq_f16 (__a, __b, __c);
33614 }
33615
33616 __extension__ extern __inline float16x8_t
33617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33618 __arm_vcmlaq_rot180 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33619 {
33620 return __arm_vcmlaq_rot180_f16 (__a, __b, __c);
33621 }
33622
33623 __extension__ extern __inline float16x8_t
33624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33625 __arm_vcmlaq_rot270 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33626 {
33627 return __arm_vcmlaq_rot270_f16 (__a, __b, __c);
33628 }
33629
33630 __extension__ extern __inline float16x8_t
33631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33632 __arm_vcmlaq_rot90 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33633 {
33634 return __arm_vcmlaq_rot90_f16 (__a, __b, __c);
33635 }
33636
33637 __extension__ extern __inline float16x8_t
33638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33639 __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33640 {
33641 return __arm_vfmaq_f16 (__a, __b, __c);
33642 }
33643
33644 __extension__ extern __inline float16x8_t
33645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33646 __arm_vfmaq (float16x8_t __a, float16x8_t __b, float16_t __c)
33647 {
33648 return __arm_vfmaq_n_f16 (__a, __b, __c);
33649 }
33650
33651 __extension__ extern __inline float16x8_t
33652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33653 __arm_vfmasq (float16x8_t __a, float16x8_t __b, float16_t __c)
33654 {
33655 return __arm_vfmasq_n_f16 (__a, __b, __c);
33656 }
33657
33658 __extension__ extern __inline float16x8_t
33659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33660 __arm_vfmsq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
33661 {
33662 return __arm_vfmsq_f16 (__a, __b, __c);
33663 }
33664
33665 __extension__ extern __inline float16x8_t
33666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33667 __arm_vabsq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33668 {
33669 return __arm_vabsq_m_f16 (__inactive, __a, __p);
33670 }
33671
33672 __extension__ extern __inline int16x8_t
33673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33674 __arm_vcvtmq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33675 {
33676 return __arm_vcvtmq_m_s16_f16 (__inactive, __a, __p);
33677 }
33678
33679 __extension__ extern __inline int16x8_t
33680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33681 __arm_vcvtnq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33682 {
33683 return __arm_vcvtnq_m_s16_f16 (__inactive, __a, __p);
33684 }
33685
33686 __extension__ extern __inline int16x8_t
33687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33688 __arm_vcvtpq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33689 {
33690 return __arm_vcvtpq_m_s16_f16 (__inactive, __a, __p);
33691 }
33692
33693 __extension__ extern __inline int16x8_t
33694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33695 __arm_vcvtq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33696 {
33697 return __arm_vcvtq_m_s16_f16 (__inactive, __a, __p);
33698 }
33699
33700 __extension__ extern __inline float16x8_t
33701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33702 __arm_vdupq_m (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
33703 {
33704 return __arm_vdupq_m_n_f16 (__inactive, __a, __p);
33705 }
33706
33707 __extension__ extern __inline float16x8_t
33708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33709 __arm_vmaxnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33710 {
33711 return __arm_vmaxnmaq_m_f16 (__a, __b, __p);
33712 }
33713
33714 __extension__ extern __inline float16_t
33715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33716 __arm_vmaxnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33717 {
33718 return __arm_vmaxnmavq_p_f16 (__a, __b, __p);
33719 }
33720
33721 __extension__ extern __inline float16_t
33722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33723 __arm_vmaxnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33724 {
33725 return __arm_vmaxnmvq_p_f16 (__a, __b, __p);
33726 }
33727
33728 __extension__ extern __inline float16x8_t
33729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33730 __arm_vminnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33731 {
33732 return __arm_vminnmaq_m_f16 (__a, __b, __p);
33733 }
33734
33735 __extension__ extern __inline float16_t
33736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33737 __arm_vminnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33738 {
33739 return __arm_vminnmavq_p_f16 (__a, __b, __p);
33740 }
33741
33742 __extension__ extern __inline float16_t
33743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33744 __arm_vminnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
33745 {
33746 return __arm_vminnmvq_p_f16 (__a, __b, __p);
33747 }
33748
33749 __extension__ extern __inline float16x8_t
33750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33751 __arm_vnegq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33752 {
33753 return __arm_vnegq_m_f16 (__inactive, __a, __p);
33754 }
33755
33756 __extension__ extern __inline float16x8_t
33757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33758 __arm_vpselq (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33759 {
33760 return __arm_vpselq_f16 (__a, __b, __p);
33761 }
33762
33763 __extension__ extern __inline float16x8_t
33764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33765 __arm_vrev64q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33766 {
33767 return __arm_vrev64q_m_f16 (__inactive, __a, __p);
33768 }
33769
33770 __extension__ extern __inline float16x8_t
33771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33772 __arm_vrndaq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33773 {
33774 return __arm_vrndaq_m_f16 (__inactive, __a, __p);
33775 }
33776
33777 __extension__ extern __inline float16x8_t
33778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33779 __arm_vrndmq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33780 {
33781 return __arm_vrndmq_m_f16 (__inactive, __a, __p);
33782 }
33783
33784 __extension__ extern __inline float16x8_t
33785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33786 __arm_vrndnq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33787 {
33788 return __arm_vrndnq_m_f16 (__inactive, __a, __p);
33789 }
33790
33791 __extension__ extern __inline float16x8_t
33792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33793 __arm_vrndpq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33794 {
33795 return __arm_vrndpq_m_f16 (__inactive, __a, __p);
33796 }
33797
33798 __extension__ extern __inline float16x8_t
33799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33800 __arm_vrndq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33801 {
33802 return __arm_vrndq_m_f16 (__inactive, __a, __p);
33803 }
33804
33805 __extension__ extern __inline float16x8_t
33806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33807 __arm_vrndxq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33808 {
33809 return __arm_vrndxq_m_f16 (__inactive, __a, __p);
33810 }
33811
33812 __extension__ extern __inline mve_pred16_t
33813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33814 __arm_vcmpeqq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33815 {
33816 return __arm_vcmpeqq_m_n_f16 (__a, __b, __p);
33817 }
33818
33819 __extension__ extern __inline mve_pred16_t
33820 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33821 __arm_vcmpgeq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33822 {
33823 return __arm_vcmpgeq_m_f16 (__a, __b, __p);
33824 }
33825
33826 __extension__ extern __inline mve_pred16_t
33827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33828 __arm_vcmpgeq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33829 {
33830 return __arm_vcmpgeq_m_n_f16 (__a, __b, __p);
33831 }
33832
33833 __extension__ extern __inline mve_pred16_t
33834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33835 __arm_vcmpgtq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33836 {
33837 return __arm_vcmpgtq_m_f16 (__a, __b, __p);
33838 }
33839
33840 __extension__ extern __inline mve_pred16_t
33841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33842 __arm_vcmpgtq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33843 {
33844 return __arm_vcmpgtq_m_n_f16 (__a, __b, __p);
33845 }
33846
33847 __extension__ extern __inline mve_pred16_t
33848 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33849 __arm_vcmpleq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33850 {
33851 return __arm_vcmpleq_m_f16 (__a, __b, __p);
33852 }
33853
33854 __extension__ extern __inline mve_pred16_t
33855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33856 __arm_vcmpleq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33857 {
33858 return __arm_vcmpleq_m_n_f16 (__a, __b, __p);
33859 }
33860
33861 __extension__ extern __inline mve_pred16_t
33862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33863 __arm_vcmpltq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33864 {
33865 return __arm_vcmpltq_m_f16 (__a, __b, __p);
33866 }
33867
33868 __extension__ extern __inline mve_pred16_t
33869 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33870 __arm_vcmpltq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33871 {
33872 return __arm_vcmpltq_m_n_f16 (__a, __b, __p);
33873 }
33874
33875 __extension__ extern __inline mve_pred16_t
33876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33877 __arm_vcmpneq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
33878 {
33879 return __arm_vcmpneq_m_f16 (__a, __b, __p);
33880 }
33881
33882 __extension__ extern __inline mve_pred16_t
33883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33884 __arm_vcmpneq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
33885 {
33886 return __arm_vcmpneq_m_n_f16 (__a, __b, __p);
33887 }
33888
33889 __extension__ extern __inline uint16x8_t
33890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33891 __arm_vcvtmq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33892 {
33893 return __arm_vcvtmq_m_u16_f16 (__inactive, __a, __p);
33894 }
33895
33896 __extension__ extern __inline uint16x8_t
33897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33898 __arm_vcvtnq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33899 {
33900 return __arm_vcvtnq_m_u16_f16 (__inactive, __a, __p);
33901 }
33902
33903 __extension__ extern __inline uint16x8_t
33904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33905 __arm_vcvtpq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33906 {
33907 return __arm_vcvtpq_m_u16_f16 (__inactive, __a, __p);
33908 }
33909
33910 __extension__ extern __inline uint16x8_t
33911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33912 __arm_vcvtq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
33913 {
33914 return __arm_vcvtq_m_u16_f16 (__inactive, __a, __p);
33915 }
33916
33917 __extension__ extern __inline float32x4_t
33918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33919 __arm_vcmlaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33920 {
33921 return __arm_vcmlaq_f32 (__a, __b, __c);
33922 }
33923
33924 __extension__ extern __inline float32x4_t
33925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33926 __arm_vcmlaq_rot180 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33927 {
33928 return __arm_vcmlaq_rot180_f32 (__a, __b, __c);
33929 }
33930
33931 __extension__ extern __inline float32x4_t
33932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33933 __arm_vcmlaq_rot270 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33934 {
33935 return __arm_vcmlaq_rot270_f32 (__a, __b, __c);
33936 }
33937
33938 __extension__ extern __inline float32x4_t
33939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33940 __arm_vcmlaq_rot90 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33941 {
33942 return __arm_vcmlaq_rot90_f32 (__a, __b, __c);
33943 }
33944
33945 __extension__ extern __inline float32x4_t
33946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33947 __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33948 {
33949 return __arm_vfmaq_f32 (__a, __b, __c);
33950 }
33951
33952 __extension__ extern __inline float32x4_t
33953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33954 __arm_vfmaq (float32x4_t __a, float32x4_t __b, float32_t __c)
33955 {
33956 return __arm_vfmaq_n_f32 (__a, __b, __c);
33957 }
33958
33959 __extension__ extern __inline float32x4_t
33960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33961 __arm_vfmasq (float32x4_t __a, float32x4_t __b, float32_t __c)
33962 {
33963 return __arm_vfmasq_n_f32 (__a, __b, __c);
33964 }
33965
33966 __extension__ extern __inline float32x4_t
33967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33968 __arm_vfmsq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
33969 {
33970 return __arm_vfmsq_f32 (__a, __b, __c);
33971 }
33972
33973 __extension__ extern __inline float32x4_t
33974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33975 __arm_vabsq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33976 {
33977 return __arm_vabsq_m_f32 (__inactive, __a, __p);
33978 }
33979
33980 __extension__ extern __inline int32x4_t
33981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33982 __arm_vcvtmq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33983 {
33984 return __arm_vcvtmq_m_s32_f32 (__inactive, __a, __p);
33985 }
33986
33987 __extension__ extern __inline int32x4_t
33988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33989 __arm_vcvtnq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33990 {
33991 return __arm_vcvtnq_m_s32_f32 (__inactive, __a, __p);
33992 }
33993
33994 __extension__ extern __inline int32x4_t
33995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33996 __arm_vcvtpq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
33997 {
33998 return __arm_vcvtpq_m_s32_f32 (__inactive, __a, __p);
33999 }
34000
34001 __extension__ extern __inline int32x4_t
34002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34003 __arm_vcvtq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34004 {
34005 return __arm_vcvtq_m_s32_f32 (__inactive, __a, __p);
34006 }
34007
34008 __extension__ extern __inline float32x4_t
34009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34010 __arm_vdupq_m (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
34011 {
34012 return __arm_vdupq_m_n_f32 (__inactive, __a, __p);
34013 }
34014
34015 __extension__ extern __inline float32x4_t
34016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34017 __arm_vmaxnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34018 {
34019 return __arm_vmaxnmaq_m_f32 (__a, __b, __p);
34020 }
34021
34022 __extension__ extern __inline float32_t
34023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34024 __arm_vmaxnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34025 {
34026 return __arm_vmaxnmavq_p_f32 (__a, __b, __p);
34027 }
34028
34029 __extension__ extern __inline float32_t
34030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34031 __arm_vmaxnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34032 {
34033 return __arm_vmaxnmvq_p_f32 (__a, __b, __p);
34034 }
34035
34036 __extension__ extern __inline float32x4_t
34037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34038 __arm_vminnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34039 {
34040 return __arm_vminnmaq_m_f32 (__a, __b, __p);
34041 }
34042
34043 __extension__ extern __inline float32_t
34044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34045 __arm_vminnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34046 {
34047 return __arm_vminnmavq_p_f32 (__a, __b, __p);
34048 }
34049
34050 __extension__ extern __inline float32_t
34051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34052 __arm_vminnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
34053 {
34054 return __arm_vminnmvq_p_f32 (__a, __b, __p);
34055 }
34056
34057 __extension__ extern __inline float32x4_t
34058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34059 __arm_vnegq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34060 {
34061 return __arm_vnegq_m_f32 (__inactive, __a, __p);
34062 }
34063
34064 __extension__ extern __inline float32x4_t
34065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34066 __arm_vpselq (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34067 {
34068 return __arm_vpselq_f32 (__a, __b, __p);
34069 }
34070
34071 __extension__ extern __inline float32x4_t
34072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34073 __arm_vrev64q_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34074 {
34075 return __arm_vrev64q_m_f32 (__inactive, __a, __p);
34076 }
34077
34078 __extension__ extern __inline float32x4_t
34079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34080 __arm_vrndaq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34081 {
34082 return __arm_vrndaq_m_f32 (__inactive, __a, __p);
34083 }
34084
34085 __extension__ extern __inline float32x4_t
34086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34087 __arm_vrndmq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34088 {
34089 return __arm_vrndmq_m_f32 (__inactive, __a, __p);
34090 }
34091
34092 __extension__ extern __inline float32x4_t
34093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34094 __arm_vrndnq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34095 {
34096 return __arm_vrndnq_m_f32 (__inactive, __a, __p);
34097 }
34098
34099 __extension__ extern __inline float32x4_t
34100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34101 __arm_vrndpq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34102 {
34103 return __arm_vrndpq_m_f32 (__inactive, __a, __p);
34104 }
34105
34106 __extension__ extern __inline float32x4_t
34107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34108 __arm_vrndq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34109 {
34110 return __arm_vrndq_m_f32 (__inactive, __a, __p);
34111 }
34112
34113 __extension__ extern __inline float32x4_t
34114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34115 __arm_vrndxq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34116 {
34117 return __arm_vrndxq_m_f32 (__inactive, __a, __p);
34118 }
34119
34120 __extension__ extern __inline mve_pred16_t
34121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34122 __arm_vcmpeqq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34123 {
34124 return __arm_vcmpeqq_m_n_f32 (__a, __b, __p);
34125 }
34126
34127 __extension__ extern __inline mve_pred16_t
34128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34129 __arm_vcmpgeq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34130 {
34131 return __arm_vcmpgeq_m_f32 (__a, __b, __p);
34132 }
34133
34134 __extension__ extern __inline mve_pred16_t
34135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34136 __arm_vcmpgeq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34137 {
34138 return __arm_vcmpgeq_m_n_f32 (__a, __b, __p);
34139 }
34140
34141 __extension__ extern __inline mve_pred16_t
34142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34143 __arm_vcmpgtq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34144 {
34145 return __arm_vcmpgtq_m_f32 (__a, __b, __p);
34146 }
34147
34148 __extension__ extern __inline mve_pred16_t
34149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34150 __arm_vcmpgtq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34151 {
34152 return __arm_vcmpgtq_m_n_f32 (__a, __b, __p);
34153 }
34154
34155 __extension__ extern __inline mve_pred16_t
34156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34157 __arm_vcmpleq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34158 {
34159 return __arm_vcmpleq_m_f32 (__a, __b, __p);
34160 }
34161
34162 __extension__ extern __inline mve_pred16_t
34163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34164 __arm_vcmpleq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34165 {
34166 return __arm_vcmpleq_m_n_f32 (__a, __b, __p);
34167 }
34168
34169 __extension__ extern __inline mve_pred16_t
34170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34171 __arm_vcmpltq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34172 {
34173 return __arm_vcmpltq_m_f32 (__a, __b, __p);
34174 }
34175
34176 __extension__ extern __inline mve_pred16_t
34177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34178 __arm_vcmpltq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34179 {
34180 return __arm_vcmpltq_m_n_f32 (__a, __b, __p);
34181 }
34182
34183 __extension__ extern __inline mve_pred16_t
34184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34185 __arm_vcmpneq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34186 {
34187 return __arm_vcmpneq_m_f32 (__a, __b, __p);
34188 }
34189
34190 __extension__ extern __inline mve_pred16_t
34191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34192 __arm_vcmpneq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34193 {
34194 return __arm_vcmpneq_m_n_f32 (__a, __b, __p);
34195 }
34196
34197 __extension__ extern __inline uint32x4_t
34198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34199 __arm_vcvtmq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34200 {
34201 return __arm_vcvtmq_m_u32_f32 (__inactive, __a, __p);
34202 }
34203
34204 __extension__ extern __inline uint32x4_t
34205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34206 __arm_vcvtnq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34207 {
34208 return __arm_vcvtnq_m_u32_f32 (__inactive, __a, __p);
34209 }
34210
34211 __extension__ extern __inline uint32x4_t
34212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34213 __arm_vcvtpq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34214 {
34215 return __arm_vcvtpq_m_u32_f32 (__inactive, __a, __p);
34216 }
34217
34218 __extension__ extern __inline uint32x4_t
34219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34220 __arm_vcvtq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
34221 {
34222 return __arm_vcvtq_m_u32_f32 (__inactive, __a, __p);
34223 }
34224
34225 __extension__ extern __inline float16x8_t
34226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34227 __arm_vcvtq_m_n (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
34228 {
34229 return __arm_vcvtq_m_n_f16_u16 (__inactive, __a, __imm6, __p);
34230 }
34231
34232 __extension__ extern __inline float16x8_t
34233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34234 __arm_vcvtq_m_n (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
34235 {
34236 return __arm_vcvtq_m_n_f16_s16 (__inactive, __a, __imm6, __p);
34237 }
34238
34239 __extension__ extern __inline float32x4_t
34240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34241 __arm_vcvtq_m_n (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
34242 {
34243 return __arm_vcvtq_m_n_f32_u32 (__inactive, __a, __imm6, __p);
34244 }
34245
34246 __extension__ extern __inline float32x4_t
34247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34248 __arm_vcvtq_m_n (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
34249 {
34250 return __arm_vcvtq_m_n_f32_s32 (__inactive, __a, __imm6, __p);
34251 }
34252
34253 __extension__ extern __inline float32x4_t
34254 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34255 __arm_vabdq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34256 {
34257 return __arm_vabdq_m_f32 (__inactive, __a, __b, __p);
34258 }
34259
34260 __extension__ extern __inline float16x8_t
34261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34262 __arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34263 {
34264 return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
34265 }
34266
34267 __extension__ extern __inline float32x4_t
34268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34269 __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34270 {
34271 return __arm_vaddq_m_f32 (__inactive, __a, __b, __p);
34272 }
34273
34274 __extension__ extern __inline float16x8_t
34275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34276 __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34277 {
34278 return __arm_vaddq_m_f16 (__inactive, __a, __b, __p);
34279 }
34280
34281 __extension__ extern __inline float32x4_t
34282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34283 __arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34284 {
34285 return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p);
34286 }
34287
34288 __extension__ extern __inline float16x8_t
34289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34290 __arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34291 {
34292 return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p);
34293 }
34294
34295 __extension__ extern __inline float32x4_t
34296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34297 __arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34298 {
34299 return __arm_vandq_m_f32 (__inactive, __a, __b, __p);
34300 }
34301
34302 __extension__ extern __inline float16x8_t
34303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34304 __arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34305 {
34306 return __arm_vandq_m_f16 (__inactive, __a, __b, __p);
34307 }
34308
34309 __extension__ extern __inline float32x4_t
34310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34311 __arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34312 {
34313 return __arm_vbicq_m_f32 (__inactive, __a, __b, __p);
34314 }
34315
34316 __extension__ extern __inline float16x8_t
34317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34318 __arm_vbicq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34319 {
34320 return __arm_vbicq_m_f16 (__inactive, __a, __b, __p);
34321 }
34322
34323 __extension__ extern __inline float32x4_t
34324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34325 __arm_vbrsrq_m (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
34326 {
34327 return __arm_vbrsrq_m_n_f32 (__inactive, __a, __b, __p);
34328 }
34329
34330 __extension__ extern __inline float16x8_t
34331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34332 __arm_vbrsrq_m (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
34333 {
34334 return __arm_vbrsrq_m_n_f16 (__inactive, __a, __b, __p);
34335 }
34336
34337 __extension__ extern __inline float32x4_t
34338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34339 __arm_vcaddq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34340 {
34341 return __arm_vcaddq_rot270_m_f32 (__inactive, __a, __b, __p);
34342 }
34343
34344 __extension__ extern __inline float16x8_t
34345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34346 __arm_vcaddq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34347 {
34348 return __arm_vcaddq_rot270_m_f16 (__inactive, __a, __b, __p);
34349 }
34350
34351 __extension__ extern __inline float32x4_t
34352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34353 __arm_vcaddq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34354 {
34355 return __arm_vcaddq_rot90_m_f32 (__inactive, __a, __b, __p);
34356 }
34357
34358 __extension__ extern __inline float16x8_t
34359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34360 __arm_vcaddq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34361 {
34362 return __arm_vcaddq_rot90_m_f16 (__inactive, __a, __b, __p);
34363 }
34364
34365 __extension__ extern __inline float32x4_t
34366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34367 __arm_vcmlaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34368 {
34369 return __arm_vcmlaq_m_f32 (__a, __b, __c, __p);
34370 }
34371
34372 __extension__ extern __inline float16x8_t
34373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34374 __arm_vcmlaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34375 {
34376 return __arm_vcmlaq_m_f16 (__a, __b, __c, __p);
34377 }
34378
34379 __extension__ extern __inline float32x4_t
34380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34381 __arm_vcmlaq_rot180_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34382 {
34383 return __arm_vcmlaq_rot180_m_f32 (__a, __b, __c, __p);
34384 }
34385
34386 __extension__ extern __inline float16x8_t
34387 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34388 __arm_vcmlaq_rot180_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34389 {
34390 return __arm_vcmlaq_rot180_m_f16 (__a, __b, __c, __p);
34391 }
34392
34393 __extension__ extern __inline float32x4_t
34394 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34395 __arm_vcmlaq_rot270_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34396 {
34397 return __arm_vcmlaq_rot270_m_f32 (__a, __b, __c, __p);
34398 }
34399
34400 __extension__ extern __inline float16x8_t
34401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34402 __arm_vcmlaq_rot270_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34403 {
34404 return __arm_vcmlaq_rot270_m_f16 (__a, __b, __c, __p);
34405 }
34406
34407 __extension__ extern __inline float32x4_t
34408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34409 __arm_vcmlaq_rot90_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34410 {
34411 return __arm_vcmlaq_rot90_m_f32 (__a, __b, __c, __p);
34412 }
34413
34414 __extension__ extern __inline float16x8_t
34415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34416 __arm_vcmlaq_rot90_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34417 {
34418 return __arm_vcmlaq_rot90_m_f16 (__a, __b, __c, __p);
34419 }
34420
34421 __extension__ extern __inline float32x4_t
34422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34423 __arm_vcmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34424 {
34425 return __arm_vcmulq_m_f32 (__inactive, __a, __b, __p);
34426 }
34427
34428 __extension__ extern __inline float16x8_t
34429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34430 __arm_vcmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34431 {
34432 return __arm_vcmulq_m_f16 (__inactive, __a, __b, __p);
34433 }
34434
34435 __extension__ extern __inline float32x4_t
34436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34437 __arm_vcmulq_rot180_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34438 {
34439 return __arm_vcmulq_rot180_m_f32 (__inactive, __a, __b, __p);
34440 }
34441
34442 __extension__ extern __inline float16x8_t
34443 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34444 __arm_vcmulq_rot180_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34445 {
34446 return __arm_vcmulq_rot180_m_f16 (__inactive, __a, __b, __p);
34447 }
34448
34449 __extension__ extern __inline float32x4_t
34450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34451 __arm_vcmulq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34452 {
34453 return __arm_vcmulq_rot270_m_f32 (__inactive, __a, __b, __p);
34454 }
34455
34456 __extension__ extern __inline float16x8_t
34457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34458 __arm_vcmulq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34459 {
34460 return __arm_vcmulq_rot270_m_f16 (__inactive, __a, __b, __p);
34461 }
34462
34463 __extension__ extern __inline float32x4_t
34464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34465 __arm_vcmulq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34466 {
34467 return __arm_vcmulq_rot90_m_f32 (__inactive, __a, __b, __p);
34468 }
34469
34470 __extension__ extern __inline float16x8_t
34471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34472 __arm_vcmulq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34473 {
34474 return __arm_vcmulq_rot90_m_f16 (__inactive, __a, __b, __p);
34475 }
34476
34477 __extension__ extern __inline int32x4_t
34478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34479 __arm_vcvtq_m_n (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
34480 {
34481 return __arm_vcvtq_m_n_s32_f32 (__inactive, __a, __imm6, __p);
34482 }
34483
34484 __extension__ extern __inline int16x8_t
34485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34486 __arm_vcvtq_m_n (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
34487 {
34488 return __arm_vcvtq_m_n_s16_f16 (__inactive, __a, __imm6, __p);
34489 }
34490
34491 __extension__ extern __inline uint32x4_t
34492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34493 __arm_vcvtq_m_n (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
34494 {
34495 return __arm_vcvtq_m_n_u32_f32 (__inactive, __a, __imm6, __p);
34496 }
34497
34498 __extension__ extern __inline uint16x8_t
34499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34500 __arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
34501 {
34502 return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p);
34503 }
34504
34505 __extension__ extern __inline float32x4_t
34506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34507 __arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34508 {
34509 return __arm_veorq_m_f32 (__inactive, __a, __b, __p);
34510 }
34511
34512 __extension__ extern __inline float16x8_t
34513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34514 __arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34515 {
34516 return __arm_veorq_m_f16 (__inactive, __a, __b, __p);
34517 }
34518
34519 __extension__ extern __inline float32x4_t
34520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34521 __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34522 {
34523 return __arm_vfmaq_m_f32 (__a, __b, __c, __p);
34524 }
34525
34526 __extension__ extern __inline float16x8_t
34527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34528 __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34529 {
34530 return __arm_vfmaq_m_f16 (__a, __b, __c, __p);
34531 }
34532
34533 __extension__ extern __inline float32x4_t
34534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34535 __arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
34536 {
34537 return __arm_vfmaq_m_n_f32 (__a, __b, __c, __p);
34538 }
34539
34540 __extension__ extern __inline float16x8_t
34541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34542 __arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
34543 {
34544 return __arm_vfmaq_m_n_f16 (__a, __b, __c, __p);
34545 }
34546
34547 __extension__ extern __inline float32x4_t
34548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34549 __arm_vfmasq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
34550 {
34551 return __arm_vfmasq_m_n_f32 (__a, __b, __c, __p);
34552 }
34553
34554 __extension__ extern __inline float16x8_t
34555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34556 __arm_vfmasq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
34557 {
34558 return __arm_vfmasq_m_n_f16 (__a, __b, __c, __p);
34559 }
34560
34561 __extension__ extern __inline float32x4_t
34562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34563 __arm_vfmsq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
34564 {
34565 return __arm_vfmsq_m_f32 (__a, __b, __c, __p);
34566 }
34567
34568 __extension__ extern __inline float16x8_t
34569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34570 __arm_vfmsq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
34571 {
34572 return __arm_vfmsq_m_f16 (__a, __b, __c, __p);
34573 }
34574
34575 __extension__ extern __inline float32x4_t
34576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34577 __arm_vmaxnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34578 {
34579 return __arm_vmaxnmq_m_f32 (__inactive, __a, __b, __p);
34580 }
34581
34582 __extension__ extern __inline float16x8_t
34583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34584 __arm_vmaxnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34585 {
34586 return __arm_vmaxnmq_m_f16 (__inactive, __a, __b, __p);
34587 }
34588
34589 __extension__ extern __inline float32x4_t
34590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34591 __arm_vminnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34592 {
34593 return __arm_vminnmq_m_f32 (__inactive, __a, __b, __p);
34594 }
34595
34596 __extension__ extern __inline float16x8_t
34597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34598 __arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34599 {
34600 return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p);
34601 }
34602
34603 __extension__ extern __inline float32x4_t
34604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34605 __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34606 {
34607 return __arm_vmulq_m_f32 (__inactive, __a, __b, __p);
34608 }
34609
34610 __extension__ extern __inline float16x8_t
34611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34612 __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34613 {
34614 return __arm_vmulq_m_f16 (__inactive, __a, __b, __p);
34615 }
34616
34617 __extension__ extern __inline float32x4_t
34618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34619 __arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34620 {
34621 return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p);
34622 }
34623
34624 __extension__ extern __inline float16x8_t
34625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34626 __arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34627 {
34628 return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p);
34629 }
34630
34631 __extension__ extern __inline float32x4_t
34632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34633 __arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34634 {
34635 return __arm_vornq_m_f32 (__inactive, __a, __b, __p);
34636 }
34637
34638 __extension__ extern __inline float16x8_t
34639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34640 __arm_vornq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34641 {
34642 return __arm_vornq_m_f16 (__inactive, __a, __b, __p);
34643 }
34644
34645 __extension__ extern __inline float32x4_t
34646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34647 __arm_vorrq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34648 {
34649 return __arm_vorrq_m_f32 (__inactive, __a, __b, __p);
34650 }
34651
34652 __extension__ extern __inline float16x8_t
34653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34654 __arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34655 {
34656 return __arm_vorrq_m_f16 (__inactive, __a, __b, __p);
34657 }
34658
34659 __extension__ extern __inline float32x4_t
34660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34661 __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34662 {
34663 return __arm_vsubq_m_f32 (__inactive, __a, __b, __p);
34664 }
34665
34666 __extension__ extern __inline float16x8_t
34667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34668 __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34669 {
34670 return __arm_vsubq_m_f16 (__inactive, __a, __b, __p);
34671 }
34672
34673 __extension__ extern __inline float32x4_t
34674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34675 __arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
34676 {
34677 return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p);
34678 }
34679
34680 __extension__ extern __inline float16x8_t
34681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34682 __arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
34683 {
34684 return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p);
34685 }
34686
34687 __extension__ extern __inline float32x4_t
34688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34689 __arm_vld1q (float32_t const * __base)
34690 {
34691 return __arm_vld1q_f32 (__base);
34692 }
34693
34694 __extension__ extern __inline float16x8_t
34695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34696 __arm_vld1q (float16_t const * __base)
34697 {
34698 return __arm_vld1q_f16 (__base);
34699 }
34700
34701 __extension__ extern __inline float16x8_t
34702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34703 __arm_vldrhq_gather_offset (float16_t const * __base, uint16x8_t __offset)
34704 {
34705 return __arm_vldrhq_gather_offset_f16 (__base, __offset);
34706 }
34707
34708 __extension__ extern __inline float16x8_t
34709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34710 __arm_vldrhq_gather_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
34711 {
34712 return __arm_vldrhq_gather_offset_z_f16 (__base, __offset, __p);
34713 }
34714
34715 __extension__ extern __inline float16x8_t
34716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34717 __arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset)
34718 {
34719 return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset);
34720 }
34721
34722 __extension__ extern __inline float16x8_t
34723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34724 __arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
34725 {
34726 return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p);
34727 }
34728
34729 __extension__ extern __inline float32x4_t
34730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34731 __arm_vldrwq_gather_offset (float32_t const * __base, uint32x4_t __offset)
34732 {
34733 return __arm_vldrwq_gather_offset_f32 (__base, __offset);
34734 }
34735
34736 __extension__ extern __inline float32x4_t
34737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34738 __arm_vldrwq_gather_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
34739 {
34740 return __arm_vldrwq_gather_offset_z_f32 (__base, __offset, __p);
34741 }
34742
34743 __extension__ extern __inline float32x4_t
34744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34745 __arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset)
34746 {
34747 return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset);
34748 }
34749
34750 __extension__ extern __inline float32x4_t
34751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34752 __arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
34753 {
34754 return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p);
34755 }
34756
34757 __extension__ extern __inline void
34758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34759 __arm_vstrwq_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
34760 {
34761 __arm_vstrwq_p_f32 (__addr, __value, __p);
34762 }
34763
34764 __extension__ extern __inline void
34765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34766 __arm_vstrwq (float32_t * __addr, float32x4_t __value)
34767 {
34768 __arm_vstrwq_f32 (__addr, __value);
34769 }
34770
34771 __extension__ extern __inline void
34772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34773 __arm_vst1q (float32_t * __addr, float32x4_t __value)
34774 {
34775 __arm_vst1q_f32 (__addr, __value);
34776 }
34777
34778 __extension__ extern __inline void
34779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34780 __arm_vst1q (float16_t * __addr, float16x8_t __value)
34781 {
34782 __arm_vst1q_f16 (__addr, __value);
34783 }
34784
34785 __extension__ extern __inline void
34786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34787 __arm_vstrhq (float16_t * __addr, float16x8_t __value)
34788 {
34789 __arm_vstrhq_f16 (__addr, __value);
34790 }
34791
34792 __extension__ extern __inline void
34793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34794 __arm_vstrhq_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
34795 {
34796 __arm_vstrhq_p_f16 (__addr, __value, __p);
34797 }
34798
34799 __extension__ extern __inline void
34800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34801 __arm_vstrhq_scatter_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
34802 {
34803 __arm_vstrhq_scatter_offset_f16 (__base, __offset, __value);
34804 }
34805
34806 __extension__ extern __inline void
34807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34808 __arm_vstrhq_scatter_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
34809 {
34810 __arm_vstrhq_scatter_offset_p_f16 (__base, __offset, __value, __p);
34811 }
34812
34813 __extension__ extern __inline void
34814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34815 __arm_vstrhq_scatter_shifted_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
34816 {
34817 __arm_vstrhq_scatter_shifted_offset_f16 (__base, __offset, __value);
34818 }
34819
34820 __extension__ extern __inline void
34821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34822 __arm_vstrhq_scatter_shifted_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
34823 {
34824 __arm_vstrhq_scatter_shifted_offset_p_f16 (__base, __offset, __value, __p);
34825 }
34826
34827 __extension__ extern __inline void
34828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34829 __arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, float32x4_t __value)
34830 {
34831 __arm_vstrwq_scatter_base_f32 (__addr, __offset, __value);
34832 }
34833
34834 __extension__ extern __inline void
34835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34836 __arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
34837 {
34838 __arm_vstrwq_scatter_base_p_f32 (__addr, __offset, __value, __p);
34839 }
34840
34841 __extension__ extern __inline void
34842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34843 __arm_vstrwq_scatter_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
34844 {
34845 __arm_vstrwq_scatter_offset_f32 (__base, __offset, __value);
34846 }
34847
34848 __extension__ extern __inline void
34849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34850 __arm_vstrwq_scatter_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
34851 {
34852 __arm_vstrwq_scatter_offset_p_f32 (__base, __offset, __value, __p);
34853 }
34854
34855 __extension__ extern __inline void
34856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34857 __arm_vstrwq_scatter_shifted_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
34858 {
34859 __arm_vstrwq_scatter_shifted_offset_f32 (__base, __offset, __value);
34860 }
34861
34862 __extension__ extern __inline void
34863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34864 __arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
34865 {
34866 __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p);
34867 }
34868
34869 __extension__ extern __inline float16x8_t
34870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34871 __arm_vaddq (float16x8_t __a, float16x8_t __b)
34872 {
34873 return __arm_vaddq_f16 (__a, __b);
34874 }
34875
34876 __extension__ extern __inline float32x4_t
34877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34878 __arm_vaddq (float32x4_t __a, float32x4_t __b)
34879 {
34880 return __arm_vaddq_f32 (__a, __b);
34881 }
34882
34883 __extension__ extern __inline void
34884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34885 __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value)
34886 {
34887 __arm_vstrwq_scatter_base_wb_f32 (__addr, __offset, __value);
34888 }
34889
34890 __extension__ extern __inline void
34891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34892 __arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
34893 {
34894 __arm_vstrwq_scatter_base_wb_p_f32 (__addr, __offset, __value, __p);
34895 }
34896
34897 __extension__ extern __inline float16x8_t
34898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34899 __arm_vminnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34900 {
34901 return __arm_vminnmq_x_f16 (__a, __b, __p);
34902 }
34903
34904 __extension__ extern __inline float32x4_t
34905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34906 __arm_vminnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34907 {
34908 return __arm_vminnmq_x_f32 (__a, __b, __p);
34909 }
34910
34911 __extension__ extern __inline float16x8_t
34912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34913 __arm_vmaxnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34914 {
34915 return __arm_vmaxnmq_x_f16 (__a, __b, __p);
34916 }
34917
34918 __extension__ extern __inline float32x4_t
34919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34920 __arm_vmaxnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34921 {
34922 return __arm_vmaxnmq_x_f32 (__a, __b, __p);
34923 }
34924
34925 __extension__ extern __inline float16x8_t
34926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34927 __arm_vabdq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34928 {
34929 return __arm_vabdq_x_f16 (__a, __b, __p);
34930 }
34931
34932 __extension__ extern __inline float32x4_t
34933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34934 __arm_vabdq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34935 {
34936 return __arm_vabdq_x_f32 (__a, __b, __p);
34937 }
34938
34939 __extension__ extern __inline float16x8_t
34940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34941 __arm_vabsq_x (float16x8_t __a, mve_pred16_t __p)
34942 {
34943 return __arm_vabsq_x_f16 (__a, __p);
34944 }
34945
34946 __extension__ extern __inline float32x4_t
34947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34948 __arm_vabsq_x (float32x4_t __a, mve_pred16_t __p)
34949 {
34950 return __arm_vabsq_x_f32 (__a, __p);
34951 }
34952
34953 __extension__ extern __inline float16x8_t
34954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34955 __arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34956 {
34957 return __arm_vaddq_x_f16 (__a, __b, __p);
34958 }
34959
34960 __extension__ extern __inline float32x4_t
34961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34962 __arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
34963 {
34964 return __arm_vaddq_x_f32 (__a, __b, __p);
34965 }
34966
34967 __extension__ extern __inline float16x8_t
34968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34969 __arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
34970 {
34971 return __arm_vaddq_x_n_f16 (__a, __b, __p);
34972 }
34973
34974 __extension__ extern __inline float32x4_t
34975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34976 __arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
34977 {
34978 return __arm_vaddq_x_n_f32 (__a, __b, __p);
34979 }
34980
34981 __extension__ extern __inline float16x8_t
34982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34983 __arm_vnegq_x (float16x8_t __a, mve_pred16_t __p)
34984 {
34985 return __arm_vnegq_x_f16 (__a, __p);
34986 }
34987
34988 __extension__ extern __inline float32x4_t
34989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34990 __arm_vnegq_x (float32x4_t __a, mve_pred16_t __p)
34991 {
34992 return __arm_vnegq_x_f32 (__a, __p);
34993 }
34994
34995 __extension__ extern __inline float16x8_t
34996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34997 __arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
34998 {
34999 return __arm_vmulq_x_f16 (__a, __b, __p);
35000 }
35001
35002 __extension__ extern __inline float32x4_t
35003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35004 __arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35005 {
35006 return __arm_vmulq_x_f32 (__a, __b, __p);
35007 }
35008
35009 __extension__ extern __inline float16x8_t
35010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35011 __arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
35012 {
35013 return __arm_vmulq_x_n_f16 (__a, __b, __p);
35014 }
35015
35016 __extension__ extern __inline float32x4_t
35017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35018 __arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
35019 {
35020 return __arm_vmulq_x_n_f32 (__a, __b, __p);
35021 }
35022
35023 __extension__ extern __inline float16x8_t
35024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35025 __arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35026 {
35027 return __arm_vsubq_x_f16 (__a, __b, __p);
35028 }
35029
35030 __extension__ extern __inline float32x4_t
35031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35032 __arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35033 {
35034 return __arm_vsubq_x_f32 (__a, __b, __p);
35035 }
35036
35037 __extension__ extern __inline float16x8_t
35038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35039 __arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
35040 {
35041 return __arm_vsubq_x_n_f16 (__a, __b, __p);
35042 }
35043
35044 __extension__ extern __inline float32x4_t
35045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35046 __arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
35047 {
35048 return __arm_vsubq_x_n_f32 (__a, __b, __p);
35049 }
35050
35051 __extension__ extern __inline float16x8_t
35052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35053 __arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35054 {
35055 return __arm_vcaddq_rot90_x_f16 (__a, __b, __p);
35056 }
35057
35058 __extension__ extern __inline float32x4_t
35059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35060 __arm_vcaddq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35061 {
35062 return __arm_vcaddq_rot90_x_f32 (__a, __b, __p);
35063 }
35064
35065 __extension__ extern __inline float16x8_t
35066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35067 __arm_vcaddq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35068 {
35069 return __arm_vcaddq_rot270_x_f16 (__a, __b, __p);
35070 }
35071
35072 __extension__ extern __inline float32x4_t
35073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35074 __arm_vcaddq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35075 {
35076 return __arm_vcaddq_rot270_x_f32 (__a, __b, __p);
35077 }
35078
35079 __extension__ extern __inline float16x8_t
35080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35081 __arm_vcmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35082 {
35083 return __arm_vcmulq_x_f16 (__a, __b, __p);
35084 }
35085
35086 __extension__ extern __inline float32x4_t
35087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35088 __arm_vcmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35089 {
35090 return __arm_vcmulq_x_f32 (__a, __b, __p);
35091 }
35092
35093 __extension__ extern __inline float16x8_t
35094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35095 __arm_vcmulq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35096 {
35097 return __arm_vcmulq_rot90_x_f16 (__a, __b, __p);
35098 }
35099
35100 __extension__ extern __inline float32x4_t
35101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35102 __arm_vcmulq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35103 {
35104 return __arm_vcmulq_rot90_x_f32 (__a, __b, __p);
35105 }
35106
35107 __extension__ extern __inline float16x8_t
35108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35109 __arm_vcmulq_rot180_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35110 {
35111 return __arm_vcmulq_rot180_x_f16 (__a, __b, __p);
35112 }
35113
35114 __extension__ extern __inline float32x4_t
35115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35116 __arm_vcmulq_rot180_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35117 {
35118 return __arm_vcmulq_rot180_x_f32 (__a, __b, __p);
35119 }
35120
35121 __extension__ extern __inline float16x8_t
35122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35123 __arm_vcmulq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35124 {
35125 return __arm_vcmulq_rot270_x_f16 (__a, __b, __p);
35126 }
35127
35128 __extension__ extern __inline float32x4_t
35129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35130 __arm_vcmulq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35131 {
35132 return __arm_vcmulq_rot270_x_f32 (__a, __b, __p);
35133 }
35134
35135 __extension__ extern __inline float16x8_t
35136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35137 __arm_vcvtq_x (uint16x8_t __a, mve_pred16_t __p)
35138 {
35139 return __arm_vcvtq_x_f16_u16 (__a, __p);
35140 }
35141
35142 __extension__ extern __inline float16x8_t
35143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35144 __arm_vcvtq_x (int16x8_t __a, mve_pred16_t __p)
35145 {
35146 return __arm_vcvtq_x_f16_s16 (__a, __p);
35147 }
35148
35149 __extension__ extern __inline float32x4_t
35150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35151 __arm_vcvtq_x (int32x4_t __a, mve_pred16_t __p)
35152 {
35153 return __arm_vcvtq_x_f32_s32 (__a, __p);
35154 }
35155
35156 __extension__ extern __inline float32x4_t
35157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35158 __arm_vcvtq_x (uint32x4_t __a, mve_pred16_t __p)
35159 {
35160 return __arm_vcvtq_x_f32_u32 (__a, __p);
35161 }
35162
35163 __extension__ extern __inline float16x8_t
35164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35165 __arm_vcvtq_x_n (int16x8_t __a, const int __imm6, mve_pred16_t __p)
35166 {
35167 return __arm_vcvtq_x_n_f16_s16 (__a, __imm6, __p);
35168 }
35169
35170 __extension__ extern __inline float16x8_t
35171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35172 __arm_vcvtq_x_n (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
35173 {
35174 return __arm_vcvtq_x_n_f16_u16 (__a, __imm6, __p);
35175 }
35176
35177 __extension__ extern __inline float32x4_t
35178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35179 __arm_vcvtq_x_n (int32x4_t __a, const int __imm6, mve_pred16_t __p)
35180 {
35181 return __arm_vcvtq_x_n_f32_s32 (__a, __imm6, __p);
35182 }
35183
35184 __extension__ extern __inline float32x4_t
35185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35186 __arm_vcvtq_x_n (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
35187 {
35188 return __arm_vcvtq_x_n_f32_u32 (__a, __imm6, __p);
35189 }
35190
35191 __extension__ extern __inline float16x8_t
35192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35193 __arm_vrndq_x (float16x8_t __a, mve_pred16_t __p)
35194 {
35195 return __arm_vrndq_x_f16 (__a, __p);
35196 }
35197
35198 __extension__ extern __inline float32x4_t
35199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35200 __arm_vrndq_x (float32x4_t __a, mve_pred16_t __p)
35201 {
35202 return __arm_vrndq_x_f32 (__a, __p);
35203 }
35204
35205 __extension__ extern __inline float16x8_t
35206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35207 __arm_vrndnq_x (float16x8_t __a, mve_pred16_t __p)
35208 {
35209 return __arm_vrndnq_x_f16 (__a, __p);
35210 }
35211
35212 __extension__ extern __inline float32x4_t
35213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35214 __arm_vrndnq_x (float32x4_t __a, mve_pred16_t __p)
35215 {
35216 return __arm_vrndnq_x_f32 (__a, __p);
35217 }
35218
35219 __extension__ extern __inline float16x8_t
35220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35221 __arm_vrndmq_x (float16x8_t __a, mve_pred16_t __p)
35222 {
35223 return __arm_vrndmq_x_f16 (__a, __p);
35224 }
35225
35226 __extension__ extern __inline float32x4_t
35227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35228 __arm_vrndmq_x (float32x4_t __a, mve_pred16_t __p)
35229 {
35230 return __arm_vrndmq_x_f32 (__a, __p);
35231 }
35232
35233 __extension__ extern __inline float16x8_t
35234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35235 __arm_vrndpq_x (float16x8_t __a, mve_pred16_t __p)
35236 {
35237 return __arm_vrndpq_x_f16 (__a, __p);
35238 }
35239
35240 __extension__ extern __inline float32x4_t
35241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35242 __arm_vrndpq_x (float32x4_t __a, mve_pred16_t __p)
35243 {
35244 return __arm_vrndpq_x_f32 (__a, __p);
35245 }
35246
35247 __extension__ extern __inline float16x8_t
35248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35249 __arm_vrndaq_x (float16x8_t __a, mve_pred16_t __p)
35250 {
35251 return __arm_vrndaq_x_f16 (__a, __p);
35252 }
35253
35254 __extension__ extern __inline float32x4_t
35255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35256 __arm_vrndaq_x (float32x4_t __a, mve_pred16_t __p)
35257 {
35258 return __arm_vrndaq_x_f32 (__a, __p);
35259 }
35260
35261 __extension__ extern __inline float16x8_t
35262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35263 __arm_vrndxq_x (float16x8_t __a, mve_pred16_t __p)
35264 {
35265 return __arm_vrndxq_x_f16 (__a, __p);
35266 }
35267
35268 __extension__ extern __inline float32x4_t
35269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35270 __arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p)
35271 {
35272 return __arm_vrndxq_x_f32 (__a, __p);
35273 }
35274
35275 __extension__ extern __inline float16x8_t
35276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35277 __arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35278 {
35279 return __arm_vandq_x_f16 (__a, __b, __p);
35280 }
35281
35282 __extension__ extern __inline float32x4_t
35283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35284 __arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35285 {
35286 return __arm_vandq_x_f32 (__a, __b, __p);
35287 }
35288
35289 __extension__ extern __inline float16x8_t
35290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35291 __arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35292 {
35293 return __arm_vbicq_x_f16 (__a, __b, __p);
35294 }
35295
35296 __extension__ extern __inline float32x4_t
35297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35298 __arm_vbicq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35299 {
35300 return __arm_vbicq_x_f32 (__a, __b, __p);
35301 }
35302
35303 __extension__ extern __inline float16x8_t
35304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35305 __arm_vbrsrq_x (float16x8_t __a, int32_t __b, mve_pred16_t __p)
35306 {
35307 return __arm_vbrsrq_x_n_f16 (__a, __b, __p);
35308 }
35309
35310 __extension__ extern __inline float32x4_t
35311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35312 __arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p)
35313 {
35314 return __arm_vbrsrq_x_n_f32 (__a, __b, __p);
35315 }
35316
35317 __extension__ extern __inline float16x8_t
35318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35319 __arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35320 {
35321 return __arm_veorq_x_f16 (__a, __b, __p);
35322 }
35323
35324 __extension__ extern __inline float32x4_t
35325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35326 __arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35327 {
35328 return __arm_veorq_x_f32 (__a, __b, __p);
35329 }
35330
35331 __extension__ extern __inline float16x8_t
35332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35333 __arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35334 {
35335 return __arm_vornq_x_f16 (__a, __b, __p);
35336 }
35337
35338 __extension__ extern __inline float32x4_t
35339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35340 __arm_vornq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35341 {
35342 return __arm_vornq_x_f32 (__a, __b, __p);
35343 }
35344
35345 __extension__ extern __inline float16x8_t
35346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35347 __arm_vorrq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
35348 {
35349 return __arm_vorrq_x_f16 (__a, __b, __p);
35350 }
35351
35352 __extension__ extern __inline float32x4_t
35353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35354 __arm_vorrq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
35355 {
35356 return __arm_vorrq_x_f32 (__a, __b, __p);
35357 }
35358
35359 __extension__ extern __inline float16x8_t
35360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35361 __arm_vrev32q_x (float16x8_t __a, mve_pred16_t __p)
35362 {
35363 return __arm_vrev32q_x_f16 (__a, __p);
35364 }
35365
35366 __extension__ extern __inline float16x8_t
35367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35368 __arm_vrev64q_x (float16x8_t __a, mve_pred16_t __p)
35369 {
35370 return __arm_vrev64q_x_f16 (__a, __p);
35371 }
35372
35373 __extension__ extern __inline float32x4_t
35374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35375 __arm_vrev64q_x (float32x4_t __a, mve_pred16_t __p)
35376 {
35377 return __arm_vrev64q_x_f32 (__a, __p);
35378 }
35379
35380 __extension__ extern __inline float16x8x4_t
35381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35382 __arm_vld4q (float16_t const * __addr)
35383 {
35384 return __arm_vld4q_f16 (__addr);
35385 }
35386
35387 __extension__ extern __inline float16x8x2_t
35388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35389 __arm_vld2q (float16_t const * __addr)
35390 {
35391 return __arm_vld2q_f16 (__addr);
35392 }
35393
35394 __extension__ extern __inline float16x8_t
35395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35396 __arm_vld1q_z (float16_t const *__base, mve_pred16_t __p)
35397 {
35398 return __arm_vld1q_z_f16 (__base, __p);
35399 }
35400
35401 __extension__ extern __inline void
35402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35403 __arm_vst2q (float16_t * __addr, float16x8x2_t __value)
35404 {
35405 __arm_vst2q_f16 (__addr, __value);
35406 }
35407
35408 __extension__ extern __inline void
35409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35410 __arm_vst1q_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
35411 {
35412 __arm_vst1q_p_f16 (__addr, __value, __p);
35413 }
35414
35415 __extension__ extern __inline float32x4x4_t
35416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35417 __arm_vld4q (float32_t const * __addr)
35418 {
35419 return __arm_vld4q_f32 (__addr);
35420 }
35421
35422 __extension__ extern __inline float32x4x2_t
35423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35424 __arm_vld2q (float32_t const * __addr)
35425 {
35426 return __arm_vld2q_f32 (__addr);
35427 }
35428
35429 __extension__ extern __inline float32x4_t
35430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35431 __arm_vld1q_z (float32_t const *__base, mve_pred16_t __p)
35432 {
35433 return __arm_vld1q_z_f32 (__base, __p);
35434 }
35435
35436 __extension__ extern __inline void
35437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35438 __arm_vst2q (float32_t * __addr, float32x4x2_t __value)
35439 {
35440 __arm_vst2q_f32 (__addr, __value);
35441 }
35442
35443 __extension__ extern __inline void
35444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35445 __arm_vst1q_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
35446 {
35447 __arm_vst1q_p_f32 (__addr, __value, __p);
35448 }
35449
35450 __extension__ extern __inline float16x8_t
35451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35452 __arm_vsetq_lane (float16_t __a, float16x8_t __b, const int __idx)
35453 {
35454 return __arm_vsetq_lane_f16 (__a, __b, __idx);
35455 }
35456
35457 __extension__ extern __inline float32x4_t
35458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35459 __arm_vsetq_lane (float32_t __a, float32x4_t __b, const int __idx)
35460 {
35461 return __arm_vsetq_lane_f32 (__a, __b, __idx);
35462 }
35463
35464 __extension__ extern __inline float16_t
35465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35466 __arm_vgetq_lane (float16x8_t __a, const int __idx)
35467 {
35468 return __arm_vgetq_lane_f16 (__a, __idx);
35469 }
35470
35471 __extension__ extern __inline float32_t
35472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35473 __arm_vgetq_lane (float32x4_t __a, const int __idx)
35474 {
35475 return __arm_vgetq_lane_f32 (__a, __idx);
35476 }
35477 #endif /* MVE Floating point. */
35478
35479 #else
35480 enum {
35481 __ARM_mve_type_fp_n = 1,
35482 __ARM_mve_type_int_n,
35483 __ARM_mve_type_float16_t_ptr,
35484 __ARM_mve_type_float16x8_t,
35485 __ARM_mve_type_float16x8x2_t,
35486 __ARM_mve_type_float16x8x4_t,
35487 __ARM_mve_type_float32_t_ptr,
35488 __ARM_mve_type_float32x4_t,
35489 __ARM_mve_type_float32x4x2_t,
35490 __ARM_mve_type_float32x4x4_t,
35491 __ARM_mve_type_int16_t_ptr,
35492 __ARM_mve_type_int16x8_t,
35493 __ARM_mve_type_int16x8x2_t,
35494 __ARM_mve_type_int16x8x4_t,
35495 __ARM_mve_type_int32_t_ptr,
35496 __ARM_mve_type_int32x4_t,
35497 __ARM_mve_type_int32x4x2_t,
35498 __ARM_mve_type_int32x4x4_t,
35499 __ARM_mve_type_int64_t_ptr,
35500 __ARM_mve_type_int64x2_t,
35501 __ARM_mve_type_int8_t_ptr,
35502 __ARM_mve_type_int8x16_t,
35503 __ARM_mve_type_int8x16x2_t,
35504 __ARM_mve_type_int8x16x4_t,
35505 __ARM_mve_type_uint16_t_ptr,
35506 __ARM_mve_type_uint16x8_t,
35507 __ARM_mve_type_uint16x8x2_t,
35508 __ARM_mve_type_uint16x8x4_t,
35509 __ARM_mve_type_uint32_t_ptr,
35510 __ARM_mve_type_uint32x4_t,
35511 __ARM_mve_type_uint32x4x2_t,
35512 __ARM_mve_type_uint32x4x4_t,
35513 __ARM_mve_type_uint64_t_ptr,
35514 __ARM_mve_type_uint64x2_t,
35515 __ARM_mve_type_uint8_t_ptr,
35516 __ARM_mve_type_uint8x16_t,
35517 __ARM_mve_type_uint8x16x2_t,
35518 __ARM_mve_type_uint8x16x4_t,
35519 __ARM_mve_unsupported_type
35520 };
35521
35522 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
35523 #define __ARM_mve_typeid(x) _Generic(x, \
35524 float16_t: __ARM_mve_type_fp_n, \
35525 float16_t *: __ARM_mve_type_float16_t_ptr, \
35526 float16_t const *: __ARM_mve_type_float16_t_ptr, \
35527 float16x8_t: __ARM_mve_type_float16x8_t, \
35528 float16x8x2_t: __ARM_mve_type_float16x8x2_t, \
35529 float16x8x4_t: __ARM_mve_type_float16x8x4_t, \
35530 float32_t: __ARM_mve_type_fp_n, \
35531 float32_t *: __ARM_mve_type_float32_t_ptr, \
35532 float32_t const *: __ARM_mve_type_float32_t_ptr, \
35533 float32x4_t: __ARM_mve_type_float32x4_t, \
35534 float32x4x2_t: __ARM_mve_type_float32x4x2_t, \
35535 float32x4x4_t: __ARM_mve_type_float32x4x4_t, \
35536 int16_t: __ARM_mve_type_int_n, \
35537 int16_t *: __ARM_mve_type_int16_t_ptr, \
35538 int16_t const *: __ARM_mve_type_int16_t_ptr, \
35539 int16x8_t: __ARM_mve_type_int16x8_t, \
35540 int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
35541 int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
35542 int32_t: __ARM_mve_type_int_n, \
35543 int32_t *: __ARM_mve_type_int32_t_ptr, \
35544 int32_t const *: __ARM_mve_type_int32_t_ptr, \
35545 int32x4_t: __ARM_mve_type_int32x4_t, \
35546 int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
35547 int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
35548 int64_t: __ARM_mve_type_int_n, \
35549 int64_t *: __ARM_mve_type_int64_t_ptr, \
35550 int64_t const *: __ARM_mve_type_int64_t_ptr, \
35551 int64x2_t: __ARM_mve_type_int64x2_t, \
35552 int8_t: __ARM_mve_type_int_n, \
35553 int8_t *: __ARM_mve_type_int8_t_ptr, \
35554 int8_t const *: __ARM_mve_type_int8_t_ptr, \
35555 int8x16_t: __ARM_mve_type_int8x16_t, \
35556 int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
35557 int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
35558 uint16_t: __ARM_mve_type_int_n, \
35559 uint16_t *: __ARM_mve_type_uint16_t_ptr, \
35560 uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
35561 uint16x8_t: __ARM_mve_type_uint16x8_t, \
35562 uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
35563 uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
35564 uint32_t: __ARM_mve_type_int_n, \
35565 uint32_t *: __ARM_mve_type_uint32_t_ptr, \
35566 uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
35567 uint32x4_t: __ARM_mve_type_uint32x4_t, \
35568 uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
35569 uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
35570 uint64_t: __ARM_mve_type_int_n, \
35571 uint64_t *: __ARM_mve_type_uint64_t_ptr, \
35572 uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
35573 uint64x2_t: __ARM_mve_type_uint64x2_t, \
35574 uint8_t: __ARM_mve_type_int_n, \
35575 uint8_t *: __ARM_mve_type_uint8_t_ptr, \
35576 uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
35577 uint8x16_t: __ARM_mve_type_uint8x16_t, \
35578 uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
35579 uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
35580 default: _Generic(x, \
35581 signed char: __ARM_mve_type_int_n, \
35582 short: __ARM_mve_type_int_n, \
35583 int: __ARM_mve_type_int_n, \
35584 long: __ARM_mve_type_int_n, \
35585 double: __ARM_mve_type_fp_n, \
35586 long long: __ARM_mve_type_int_n, \
35587 unsigned char: __ARM_mve_type_int_n, \
35588 unsigned short: __ARM_mve_type_int_n, \
35589 unsigned int: __ARM_mve_type_int_n, \
35590 unsigned long: __ARM_mve_type_int_n, \
35591 unsigned long long: __ARM_mve_type_int_n, \
35592 default: __ARM_mve_unsupported_type))
35593 #else
35594 #define __ARM_mve_typeid(x) _Generic(x, \
35595 int16_t: __ARM_mve_type_int_n, \
35596 int16_t *: __ARM_mve_type_int16_t_ptr, \
35597 int16_t const *: __ARM_mve_type_int16_t_ptr, \
35598 int16x8_t: __ARM_mve_type_int16x8_t, \
35599 int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
35600 int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
35601 int32_t: __ARM_mve_type_int_n, \
35602 int32_t *: __ARM_mve_type_int32_t_ptr, \
35603 int32_t const *: __ARM_mve_type_int32_t_ptr, \
35604 int32x4_t: __ARM_mve_type_int32x4_t, \
35605 int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
35606 int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
35607 int64_t: __ARM_mve_type_int_n, \
35608 int64_t *: __ARM_mve_type_int64_t_ptr, \
35609 int64_t const *: __ARM_mve_type_int64_t_ptr, \
35610 int64x2_t: __ARM_mve_type_int64x2_t, \
35611 int8_t: __ARM_mve_type_int_n, \
35612 int8_t *: __ARM_mve_type_int8_t_ptr, \
35613 int8_t const *: __ARM_mve_type_int8_t_ptr, \
35614 int8x16_t: __ARM_mve_type_int8x16_t, \
35615 int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
35616 int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
35617 uint16_t: __ARM_mve_type_int_n, \
35618 uint16_t *: __ARM_mve_type_uint16_t_ptr, \
35619 uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
35620 uint16x8_t: __ARM_mve_type_uint16x8_t, \
35621 uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
35622 uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
35623 uint32_t: __ARM_mve_type_int_n, \
35624 uint32_t *: __ARM_mve_type_uint32_t_ptr, \
35625 uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
35626 uint32x4_t: __ARM_mve_type_uint32x4_t, \
35627 uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
35628 uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
35629 uint64_t: __ARM_mve_type_int_n, \
35630 uint64_t *: __ARM_mve_type_uint64_t_ptr, \
35631 uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
35632 uint64x2_t: __ARM_mve_type_uint64x2_t, \
35633 uint8_t: __ARM_mve_type_int_n, \
35634 uint8_t *: __ARM_mve_type_uint8_t_ptr, \
35635 uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
35636 uint8x16_t: __ARM_mve_type_uint8x16_t, \
35637 uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
35638 uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
35639 default: _Generic(x, \
35640 signed char: __ARM_mve_type_int_n, \
35641 short: __ARM_mve_type_int_n, \
35642 int: __ARM_mve_type_int_n, \
35643 long: __ARM_mve_type_int_n, \
35644 long long: __ARM_mve_type_int_n, \
35645 unsigned char: __ARM_mve_type_int_n, \
35646 unsigned short: __ARM_mve_type_int_n, \
35647 unsigned int: __ARM_mve_type_int_n, \
35648 unsigned long: __ARM_mve_type_int_n, \
35649 unsigned long long: __ARM_mve_type_int_n, \
35650 default: __ARM_mve_unsupported_type))
35651 #endif /* MVE Floating point. */
35652
35653 extern void *__ARM_undef;
35654 #define __ARM_mve_coerce(param, type) \
35655 _Generic(param, type: param, default: *(type *)__ARM_undef)
35656 #define __ARM_mve_coerce1(param, type) \
35657 _Generic(param, type: param, const type: param, default: *(type *)__ARM_undef)
35658 #define __ARM_mve_coerce2(param, type) \
35659 _Generic(param, type: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef)
35660
35661 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
35662
35663 #define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35664 __typeof(p1) __p1 = (p1); \
35665 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35666 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
35667 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
35668 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
35669 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
35670 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
35671 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \
35672 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \
35673 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));})
35674
35675 #define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \
35676 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35677 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35678 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35679
35680 #define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \
35681 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35682 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35683 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35684
35685 #define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \
35686 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35687 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35688 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35689
35690 #define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \
35691 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35692 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35693 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35694
35695 #define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \
35696 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35697 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35698 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35699
35700 #define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \
35701 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35702 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35703 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35704
35705 #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
35706 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35707 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35708 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35709 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35710 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35711 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35712 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
35713 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35714 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35715
35716 #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
35717 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35718 int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35719 int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35720 int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35721 int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35722 int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35723
35724 #define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \
35725 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35726 int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35727 int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35728
35729 #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
35730 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35731 int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35732 int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35733 int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35734 int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
35735 int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
35736
35737 #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
35738 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35739 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35740 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35741 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35742 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35743 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35744
35745 #define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
35746 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35747 int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35748
35749 #define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
35750 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35751 int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
35752
35753 #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
35754 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35755 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35756 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
35757
35758 #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
35759 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35760 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35761 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35762 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35763
35764 #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
35765 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35766 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35767 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35768 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35769
35770 #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
35771 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35772 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35773 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35774 int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35775 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35776 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35777 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35778
35779 #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
35780 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35781 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35782 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35783 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35784 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
35785
35786 #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
35787 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35788 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35789 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35790 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35791 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
35792
35793 #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
35794 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35795 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35796 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35797 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35798 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
35799 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35800 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35801
35802 #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
35803 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35804 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
35805 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35806 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
35807
35808 #define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \
35809 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35810 int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
35811 int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
35812 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
35813 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
35814
35815 #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35816 __typeof(p1) __p1 = (p1); \
35817 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35818 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35819 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35820 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35821 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35822 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35823 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
35824
35825 #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35826 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35827 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
35828 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
35829 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
35830 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
35831 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
35832 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
35833
35834 #define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35835 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
35836 int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
35837 int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
35838 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
35839 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
35840
35841 #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35842 __typeof(p1) __p1 = (p1); \
35843 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35844 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35845 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35846 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35847 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35848 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35849 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35850 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35851 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35852
35853 #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35854 __typeof(p1) __p1 = (p1); \
35855 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35856 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35857 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35858 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35859 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35860 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35861 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35862 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35863 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35864
35865 #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35866 __typeof(p1) __p1 = (p1); \
35867 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35868 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35869 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35870 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35871 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35872 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35873 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35874 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1, float16x8_t)), \
35875 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1, float32x4_t)), \
35876 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
35877 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
35878 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
35879 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
35880 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
35881 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)), \
35882 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35883 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
35884
35885 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35886 __typeof(p1) __p1 = (p1); \
35887 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35888 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35889 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35890 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35891 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35892 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35893 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35894 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35895 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35896
35897 #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35898 __typeof(p1) __p1 = (p1); \
35899 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35900 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
35901 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
35902 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
35903 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
35904 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35905 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35906 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35907 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35908 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35909 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35910 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35911 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35912
35913 #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35914 __typeof(p1) __p1 = (p1); \
35915 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35916 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35917 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35918 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35919 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35920 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35921 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35922 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35923 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35924
35925 #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35926 __typeof(p1) __p1 = (p1); \
35927 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35928 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
35929 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
35930 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
35931 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
35932 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
35933 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
35934 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35935 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
35936 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35937 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35938 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35939 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35940 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35941 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35942 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35943 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35944
35945 #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35946 __typeof(p1) __p1 = (p1); \
35947 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35948 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35949 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35950 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35951 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35952 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35953 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35954 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35955 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35956
35957 #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35958 __typeof(p1) __p1 = (p1); \
35959 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35960 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
35961 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
35962 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
35963 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
35964 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
35965 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
35966 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
35967 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
35968 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35969 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35970 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35971 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35972 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35973 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35974 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35975 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35976
35977 #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
35978 __typeof(p1) __p1 = (p1); \
35979 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35980 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
35981 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
35982 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
35983 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
35984 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
35985 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
35986 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
35987 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
35988
35989 #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
35990 __typeof(p1) __p1 = (p1); \
35991 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
35992 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
35993 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
35994 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
35995 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
35996 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
35997 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
35998 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
35999 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36000 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36001 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
36002 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
36003 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
36004 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36005 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36006 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36007 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36008
36009 #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36010 __typeof(p1) __p1 = (p1); \
36011 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36012 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36013 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36014 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36015 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36016 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36017 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36018 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36019 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36020 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36021 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36022
36023 #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36024 __typeof(p1) __p1 = (p1); \
36025 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36026 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36027 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36028 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36029 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36030 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36031 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36032 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36033 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36034 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36035 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36036
36037 #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36038 __typeof(p1) __p1 = (p1); \
36039 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36040 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36041 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36042 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36043 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36044 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36045 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36046 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36047 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
36048 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36049 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
36050
36051 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36052 __typeof(p1) __p1 = (p1); \
36053 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36054 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36055 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36056 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36057 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36058 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36059 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36060 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36061 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
36062 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36063 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36064 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36065 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36066 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36067 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36068 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36069 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36070
36071 #define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36072 __typeof(p1) __p1 = (p1); \
36073 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36074 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36075 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36076
36077 #define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36078 __typeof(p1) __p1 = (p1); \
36079 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36080 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36081 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36082
36083 #define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36084 __typeof(p1) __p1 = (p1); \
36085 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36086 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36087 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36088
36089 #define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36090 __typeof(p1) __p1 = (p1); \
36091 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36092 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36093 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36094
36095 #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36096 __typeof(p1) __p1 = (p1); \
36097 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36098 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36099 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36100 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36101 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36102 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36103 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36104 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36105 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36106
36107 #define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36108 __typeof(p1) __p1 = (p1); \
36109 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36110 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36111 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36112
36113 #define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36114 __typeof(p1) __p1 = (p1); \
36115 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36116 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36117 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36118
36119 #define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36120 __typeof(p1) __p1 = (p1); \
36121 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36122 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36123 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36124
36125 #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36126 __typeof(p1) __p1 = (p1); \
36127 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36128 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36129 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36130
36131 #define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36132 __typeof(p1) __p1 = (p1); \
36133 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36134 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36135 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36136
36137 #define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36138 __typeof(p1) __p1 = (p1); \
36139 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36140 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36141 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36142
36143 #define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36144 __typeof(p1) __p1 = (p1); \
36145 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36146 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36147 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36148
36149 #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36150 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36151 int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36152 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36153 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36154 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36155 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36156 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
36157 int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
36158 int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
36159
36160 #define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36161 __typeof(p1) __p1 = (p1); \
36162 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36163 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36164 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36165
36166 #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36167 __typeof(p1) __p1 = (p1); \
36168 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36169 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
36170 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)), \
36171 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36172 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36173 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36174 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36175 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36176 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36177 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36178 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36179 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36180 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36181 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36182 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
36183 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
36184 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
36185
36186 #define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36187 __typeof(p1) __p1 = (p1); \
36188 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36189 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
36190 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
36191
36192 #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36193 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36194 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36195 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36196 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36197 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36198 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36199 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36200
36201 #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36202 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36203 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36204 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36205 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36206 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36207 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36208 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36209
36210 #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36211 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36212 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36213 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36214 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36215 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
36216
36217 #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36218 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36219 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36220 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36221 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36222 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
36223
36224 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36225 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36226 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36227 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36228 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36229 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36230 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36231 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36232
36233 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36234 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36235 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36236 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36237 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36238 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36239 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36240 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36241
36242 #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36243 __typeof(p1) __p1 = (p1); \
36244 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36245 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36246 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36247 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36248 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36249 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36250 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36251 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36252 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36253 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36254 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36255 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36256 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36257
36258 #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36259 __typeof(p1) __p1 = (p1); \
36260 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36261 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36262 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36263 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36264 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36265 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36266 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36267
36268 #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36269 __typeof(p1) __p1 = (p1); \
36270 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36271 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36272 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36273 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36274 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36275 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36276 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36277
36278 #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36279 __typeof(p1) __p1 = (p1); \
36280 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36281 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36282 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36283 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36284 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36285 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36286 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36287 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36288 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36289 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36290 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36291 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36292 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36293
36294 #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36295 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36296 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36297 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36298 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
36299
36300 #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36301 __typeof(p1) __p1 = (p1); \
36302 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36303 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36304 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36305 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36306 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36307 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36308 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36309
36310 #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36311 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36312 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36313 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36314 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36315 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36316 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36317 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36318
36319 #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36320 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36321 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
36322 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
36323 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
36324 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
36325 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
36326 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
36327
36328 #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36329 __typeof(p1) __p1 = (p1); \
36330 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36331 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36332 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36333 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36334 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36335 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36336 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36337 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36338 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36339 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36340 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
36341 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
36342 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
36343
36344 #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36345 __typeof(p1) __p1 = (p1); \
36346 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36347 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36348 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36349 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36350 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36351 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36352 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
36353
36354 #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36355 __typeof(p1) __p1 = (p1); \
36356 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36357 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36358 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36359
36360 #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36361 __typeof(p1) __p1 = (p1); \
36362 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36363 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36364 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36365
36366 #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36367 __typeof(p1) __p1 = (p1); \
36368 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36369 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36370 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36371 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36372 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36373
36374 #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36375 __typeof(p1) __p1 = (p1); \
36376 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36377 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36378 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36379 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36380 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36381
36382 #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36383 __typeof(p1) __p1 = (p1); \
36384 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36385 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36386 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36387 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36388 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36389
36390 #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36391 __typeof(p1) __p1 = (p1); \
36392 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36393 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36394 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36395
36396 #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36397 __typeof(p1) __p1 = (p1); \
36398 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36399 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36400 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36401 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36402 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36403
36404 #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36405 __typeof(p1) __p1 = (p1); \
36406 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36407 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36408 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36409 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36410 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36411 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36412 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36413
36414 #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36415 __typeof(p1) __p1 = (p1); \
36416 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36417 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36418 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36419 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36420 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36421 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36422 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36423 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36424 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36425 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36426 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36427 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36428 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36429
36430 #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36431 __typeof(p1) __p1 = (p1); \
36432 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36433 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36434 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
36435
36436 #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36437 __typeof(p1) __p1 = (p1); \
36438 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36439 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36440 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
36441
36442 #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36443 __typeof(p1) __p1 = (p1); \
36444 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36445 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36446 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36447 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36448 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36449 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36450 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36451
36452 #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36453 __typeof(p1) __p1 = (p1); \
36454 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36455 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36456 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36457 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36458 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36459 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36460 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36461 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36462 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36463 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36464 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36465 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36466 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36467
36468 #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36469 __typeof(p1) __p1 = (p1); \
36470 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36471 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36472 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36473 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36474
36475 #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36476 __typeof(p1) __p1 = (p1); \
36477 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36478 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36479 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36480 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36481
36482 #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36483 __typeof(p1) __p1 = (p1); \
36484 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36485 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
36486 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
36487 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
36488 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
36489 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
36490 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
36491 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36492 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36493 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36494 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36495 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36496 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36497
36498 #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36499 __typeof(p1) __p1 = (p1); \
36500 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36501 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36502 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36503 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36504 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36505 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36506 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36507
36508 #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36509 __typeof(p1) __p1 = (p1); \
36510 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36511 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36512 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36513 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36514
36515 #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36516 __typeof(p1) __p1 = (p1); \
36517 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36518 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36519 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36520 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36521 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36522 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36523 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36524
36525 #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36526 __typeof(p1) __p1 = (p1); \
36527 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36528 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36529 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36530 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
36531
36532 #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36533 __typeof(p1) __p1 = (p1); \
36534 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36535 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36536 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36537 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36538 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36539
36540 #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36541 __typeof(p1) __p1 = (p1); \
36542 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36543 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36544 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36545 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36546 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36547
36548 #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36549 __typeof(p1) __p1 = (p1); \
36550 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36551 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36552 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36553 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36554 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36555 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36556 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36557
36558 #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
36559 __typeof(p1) __p1 = (p1); \
36560 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36561 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
36562 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
36563 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
36564 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
36565 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
36566 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
36567
36568 #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36569 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36570 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36571 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36572 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36573 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36574
36575 #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36576 __typeof(p1) __p1 = (p1); \
36577 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36578 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36579 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36580 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36581 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36582
36583 #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36584 __typeof(p1) __p1 = (p1); \
36585 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36586 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36587 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36588
36589 #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36590 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36591 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36592 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36593 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36594 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36595 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36596 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36597
36598 #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36599 __typeof(p1) __p1 = (p1); \
36600 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36601 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36602 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36603 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36604
36605 #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36606 __typeof(p1) __p1 = (p1); \
36607 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36608 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36609 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36610 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36611 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36612 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36613 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36614
36615 #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36616 __typeof(p1) __p1 = (p1); \
36617 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36618 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36619 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36620 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36621
36622 #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36623 __typeof(p1) __p1 = (p1); \
36624 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36625 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36626 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36627 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36628
36629 #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36630 __typeof(p1) __p1 = (p1); \
36631 __typeof(p2) __p2 = (p2); \
36632 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36633 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36634 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36635 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
36636 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
36637 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
36638 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
36639
36640 #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36641 __typeof(p1) __p1 = (p1); \
36642 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36643 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36644 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36645 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36646 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36647 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36648 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36649
36650 #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36651 __typeof(p1) __p1 = (p1); \
36652 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36653 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36654 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36655 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36656 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36657 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36658 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36659
36660 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36661 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36662 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36663 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36664 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36665 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36666 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36667 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36668
36669 #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36670 __typeof(p1) __p1 = (p1); \
36671 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36672 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36673 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36674 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36675 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36676 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36677 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
36678
36679 #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36680 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36681 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36682 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36683 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36684 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36685 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36686 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36687
36688 #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36689 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
36690 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
36691 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
36692 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
36693 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
36694 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
36695 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
36696
36697 #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36698 __typeof(p1) __p1 = (p1); \
36699 __typeof(p2) __p2 = (p2); \
36700 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36701 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36702 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36703 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36704
36705 #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36706 __typeof(p1) __p1 = (p1); \
36707 __typeof(p2) __p2 = (p2); \
36708 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36709 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36710 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36711 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36712
36713 #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36714 __typeof(p1) __p1 = (p1); \
36715 __typeof(p2) __p2 = (p2); \
36716 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36717 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36718 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36719 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36720
36721 #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36722 __typeof(p1) __p1 = (p1); \
36723 __typeof(p2) __p2 = (p2); \
36724 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36725 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36726 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36727 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36728
36729 #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36730 __typeof(p1) __p1 = (p1); \
36731 __typeof(p2) __p2 = (p2); \
36732 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36733 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36734 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36735 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36736
36737 #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36738 __typeof(p1) __p1 = (p1); \
36739 __typeof(p2) __p2 = (p2); \
36740 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36741 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36742 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36743 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
36744 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
36745 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
36746 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
36747
36748 #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36749 __typeof(p1) __p1 = (p1); \
36750 __typeof(p2) __p2 = (p2); \
36751 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36752 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
36753 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
36754 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
36755
36756 #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36757 __typeof(p1) __p1 = (p1); \
36758 __typeof(p2) __p2 = (p2); \
36759 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36760 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36761 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36762 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36763
36764 #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36765 __typeof(p1) __p1 = (p1); \
36766 __typeof(p2) __p2 = (p2); \
36767 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36768 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36769 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36770 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36771
36772 #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36773 __typeof(p1) __p1 = (p1); \
36774 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36775 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36776 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36777 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
36778
36779 #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36780 __typeof(p1) __p1 = (p1); \
36781 __typeof(p2) __p2 = (p2); \
36782 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36783 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36784 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36785 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36786
36787 #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36788 __typeof(p1) __p1 = (p1); \
36789 __typeof(p2) __p2 = (p2); \
36790 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36791 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36792 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36793 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36794
36795 #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36796 __typeof(p1) __p1 = (p1); \
36797 __typeof(p2) __p2 = (p2); \
36798 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36799 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36800 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36801 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36802
36803 #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36804 __typeof(p1) __p1 = (p1); \
36805 __typeof(p2) __p2 = (p2); \
36806 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36807 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
36808 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
36809 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
36810
36811 #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36812 __typeof(p1) __p1 = (p1); \
36813 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36814 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36815 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36816 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36817 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
36818
36819 #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36820 __typeof(p1) __p1 = (p1); \
36821 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36822 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36823 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36824 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36825 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36826
36827 #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36828 __typeof(p1) __p1 = (p1); \
36829 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36830 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36831 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36832 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36833 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36834
36835 #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36836 __typeof(p1) __p1 = (p1); \
36837 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36838 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36839 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36840 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36841 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
36842
36843 #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36844 __typeof(p1) __p1 = (p1); \
36845 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36846 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36847 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36848 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36849 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
36850
36851 #define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36852 __typeof(p1) __p1 = (p1); \
36853 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36854 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36855 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36856 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36857 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36858
36859 #define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36860 __typeof(p1) __p1 = (p1); \
36861 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36862 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36863 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36864 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36865 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
36866 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36867 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36868 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36869 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36870
36871 #define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
36872 __typeof(p1) __p1 = (p1); \
36873 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36874 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
36875 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
36876 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
36877 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
36878 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
36879 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
36880 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
36881 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
36882
36883 #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36884 __typeof(p1) __p1 = (p1); \
36885 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36886 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36887 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36888 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36889 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36890 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36891
36892 #define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36893 __typeof(p1) __p1 = (p1); \
36894 __typeof(p2) __p2 = (p2); \
36895 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36896 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36897 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36898
36899 #define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36900 __typeof(p1) __p1 = (p1); \
36901 __typeof(p2) __p2 = (p2); \
36902 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36903 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36904 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36905
36906 #define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36907 __typeof(p1) __p1 = (p1); \
36908 __typeof(p2) __p2 = (p2); \
36909 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36910 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36911 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36912
36913 #define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36914 __typeof(p1) __p1 = (p1); \
36915 __typeof(p2) __p2 = (p2); \
36916 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
36917 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
36918 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
36919
36920 #define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36921 __typeof(p1) __p1 = (p1); \
36922 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36923 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36924 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36925
36926 #define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36927 __typeof(p1) __p1 = (p1); \
36928 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36929 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36930 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36931
36932 #define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36933 __typeof(p1) __p1 = (p1); \
36934 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36935 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36936 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36937
36938 #define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36939 __typeof(p1) __p1 = (p1); \
36940 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36941 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36942 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36943 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36944 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36945 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36946 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36947 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36948 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
36949 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36950 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
36951
36952 #define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36953 __typeof(p1) __p1 = (p1); \
36954 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36955 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36956 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36957 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36958 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36959 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36960 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36961 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36962 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36963 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36964 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36965
36966 #define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36967 __typeof(p1) __p1 = (p1); \
36968 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36969 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36970 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36971 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36972 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36973 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36974 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36975 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36976 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36977 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36978 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36979
36980 #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
36981 __typeof(p1) __p1 = (p1); \
36982 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
36983 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
36984 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
36985 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
36986 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
36987 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
36988 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
36989 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
36990 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
36991 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
36992 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
36993 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
36994 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
36995 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
36996 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
36997 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
36998 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2));})
36999
37000 #define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37001 __typeof(p1) __p1 = (p1); \
37002 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37003 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37004 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37005
37006 #define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37007 __typeof(p1) __p1 = (p1); \
37008 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37009 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37010 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37011
37012 #define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37013 __typeof(p1) __p1 = (p1); \
37014 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37015 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37016 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37017 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37018 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37019
37020 #define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37021 __typeof(p1) __p1 = (p1); \
37022 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37023 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37024 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37025 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37026 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37027
37028 #define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37029 __typeof(p1) __p1 = (p1); \
37030 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37031 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37032 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
37033 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37034 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37035
37036 #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37037 __typeof(p1) __p1 = (p1); \
37038 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37039 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
37040 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
37041 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
37042 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
37043 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
37044 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2), \
37045 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), (float16_t) __p1, p2), \
37046 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), (float32_t) __p1, p2));})
37047
37048 #define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37049 __typeof(p1) __p1 = (p1); \
37050 __typeof(p2) __p2 = (p2); \
37051 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37052 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
37053 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)), \
37054 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
37055 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
37056
37057 #define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37058 __typeof(p1) __p1 = (p1); \
37059 __typeof(p2) __p2 = (p2); \
37060 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37061 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
37062 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
37063
37064 #define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37065 __typeof(p1) __p1 = (p1); \
37066 __typeof(p2) __p2 = (p2); \
37067 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37068 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double)), \
37069 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double)));})
37070
37071 #define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37072 __typeof(p1) __p1 = (p1); \
37073 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37074 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37075 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37076
37077 #define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37078 __typeof(p1) __p1 = (p1); \
37079 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37080 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37081 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37082
37083 #define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37084 __typeof(p1) __p1 = (p1); \
37085 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37086 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37087 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37088
37089 #define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37090 __typeof(p1) __p1 = (p1); \
37091 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37092 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37093 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37094
37095 #define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37096 __typeof(p1) __p1 = (p1); \
37097 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37098 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37099 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37100
37101 #define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37102 __typeof(p1) __p1 = (p1); \
37103 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37104 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37105 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37106
37107 #define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37108 __typeof(p1) __p1 = (p1); \
37109 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37110 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37111 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37112
37113 #define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37114 __typeof(p1) __p1 = (p1); \
37115 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37116 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37117 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37118
37119 #define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37120 __typeof(p1) __p1 = (p1); \
37121 __typeof(p2) __p2 = (p2); \
37122 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37123 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37124 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));})
37125
37126 #define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37127 __typeof(p1) __p1 = (p1); \
37128 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37129 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37130 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37131
37132 #define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37133 __typeof(p1) __p1 = (p1); \
37134 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37135 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37136 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37137
37138 #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37139 __typeof(p1) __p1 = (p1); \
37140 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37141 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37142 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37143 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37144 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37145 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37146 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37147 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37148 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37149
37150 #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37151 __typeof(p1) __p1 = (p1); \
37152 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37153 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37154 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37155 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37156 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37157 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));})
37158
37159 #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37160 __typeof(p1) __p1 = (p1); \
37161 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37162 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37163 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37164 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37165 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
37166 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37167 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37168 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37169 int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
37170 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37171 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37172
37173 #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
37174 __typeof(p1) __p1 = (p1); \
37175 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37176 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
37177 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
37178 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
37179 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
37180 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
37181 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
37182 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
37183 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
37184 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double)), \
37185 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double)));})
37186
37187 #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37188 __typeof(p1) __p1 = (p1); \
37189 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37190 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37191 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37192 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37193 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37194
37195 #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37196 __typeof(p1) __p1 = (p1); \
37197 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37198 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37199 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
37200
37201 #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37202 __typeof(p1) __p1 = (p1); \
37203 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37204 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37205 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37206
37207 #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37208 __typeof(p1) __p1 = (p1); \
37209 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37210 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37211 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37212 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37213 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37214
37215 #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37216 __typeof(p1) __p1 = (p1); \
37217 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37218 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37219 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37220 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37221 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37222
37223 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37224 __typeof(p1) __p1 = (p1); \
37225 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37226 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37227 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37228
37229 #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37230 __typeof(p1) __p1 = (p1); \
37231 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37232 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37233 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37234 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37235 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37236
37237 #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37238 __typeof(p1) __p1 = (p1); \
37239 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37240 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37241 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37242 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37243 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37244
37245 #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37246 __typeof(p1) __p1 = (p1); \
37247 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37248 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37249 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37250
37251 #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37252 __typeof(p1) __p1 = (p1); \
37253 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37254 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37255 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37256
37257 #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37258 __typeof(p1) __p1 = (p1); \
37259 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37260 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37261 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37262 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37263 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
37264
37265 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37266 __typeof(p1) __p1 = (p1); \
37267 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37268 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37269 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
37270
37271 #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37272 __typeof(p1) __p1 = (p1); \
37273 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37274 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37275 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37276 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37277 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37278 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37279
37280 #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
37281 __typeof(p1) __p1 = (p1); \
37282 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37283 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37284 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37285 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37286 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
37287 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
37288 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
37289 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce2(__p1, double), p2), \
37290 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce2(__p1, double), p2), \
37291 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37292 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37293
37294 #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37295 __typeof(p1) __p1 = (p1); \
37296 __typeof(p2) __p2 = (p2); \
37297 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37298 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37299 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37300 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37301 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37302 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37303 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37304 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37305 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37306
37307 #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37308 __typeof(p1) __p1 = (p1); \
37309 __typeof(p2) __p2 = (p2); \
37310 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37311 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37312 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37313 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37314 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37315 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37316 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37317 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37318 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37319 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
37320 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
37321 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
37322 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
37323 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
37324 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
37325 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37326 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37327
37328 #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37329 __typeof(p1) __p1 = (p1); \
37330 __typeof(p2) __p2 = (p2); \
37331 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37332 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37333 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37334 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37335 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37336 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37337 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37338 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37339 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37340
37341 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37342 __typeof(p1) __p1 = (p1); \
37343 __typeof(p2) __p2 = (p2); \
37344 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37345 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37346 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37347 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37348 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37349 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37350 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37351 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37352 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37353
37354 #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37355 __typeof(p1) __p1 = (p1); \
37356 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
37357 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
37358 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
37359 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
37360 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
37361 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
37362 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
37363 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
37364 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
37365
37366 #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37367 __typeof(p1) __p1 = (p1); \
37368 __typeof(p2) __p2 = (p2); \
37369 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37370 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37371 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37372 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37373 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37374 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37375 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37376 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37377 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37378
37379 #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37380 __typeof(p1) __p1 = (p1); \
37381 __typeof(p2) __p2 = (p2); \
37382 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37383 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37384 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37385 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37386 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37387 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37388 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37389 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37390 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37391
37392 #define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37393 __typeof(p1) __p1 = (p1); \
37394 __typeof(p2) __p2 = (p2); \
37395 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37396 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37397 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37398
37399 #define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37400 __typeof(p1) __p1 = (p1); \
37401 __typeof(p2) __p2 = (p2); \
37402 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37403 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37404 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37405
37406 #define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37407 __typeof(p1) __p1 = (p1); \
37408 __typeof(p2) __p2 = (p2); \
37409 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37410 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37411 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37412
37413 #define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37414 __typeof(p1) __p1 = (p1); \
37415 __typeof(p2) __p2 = (p2); \
37416 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37417 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37418 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37419
37420 #define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37421 __typeof(p1) __p1 = (p1); \
37422 __typeof(p2) __p2 = (p2); \
37423 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37424 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37425 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37426
37427 #define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37428 __typeof(p1) __p1 = (p1); \
37429 __typeof(p2) __p2 = (p2); \
37430 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37431 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37432 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37433
37434 #define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37435 __typeof(p1) __p1 = (p1); \
37436 __typeof(p2) __p2 = (p2); \
37437 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37438 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37439 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37440
37441 #define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37442 __typeof(p1) __p1 = (p1); \
37443 __typeof(p2) __p2 = (p2); \
37444 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \
37445 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37446 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37447
37448 #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37449 __typeof(p1) __p1 = (p1); \
37450 __typeof(p2) __p2 = (p2); \
37451 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37452 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37453 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37454 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37455 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37456 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37457 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37458 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37459 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37460
37461 #define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37462 __typeof(p1) __p1 = (p1); \
37463 __typeof(p2) __p2 = (p2); \
37464 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37465 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37466 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37467 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37468 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37469
37470 #define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37471 __typeof(p1) __p1 = (p1); \
37472 __typeof(p2) __p2 = (p2); \
37473 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37474 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37475 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37476
37477 #define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37478 __typeof(p1) __p1 = (p1); \
37479 __typeof(p2) __p2 = (p2); \
37480 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37481 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37482 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37483
37484 #define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37485 __typeof(p1) __p1 = (p1); \
37486 __typeof(p2) __p2 = (p2); \
37487 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37488 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37489 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37490
37491 #define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37492 __typeof(p1) __p1 = (p1); \
37493 __typeof(p2) __p2 = (p2); \
37494 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37495 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37496 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37497
37498 #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37499 __typeof(p1) __p1 = (p1); \
37500 __typeof(p2) __p2 = (p2); \
37501 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37502 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37503 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37504 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37505 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37506 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37507 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37508 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37509 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37510 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
37511 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
37512 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
37513 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
37514 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
37515 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
37516 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37517 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37518
37519 #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37520 __typeof(p1) __p1 = (p1); \
37521 __typeof(p2) __p2 = (p2); \
37522 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37523 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37524 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37525 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37526 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37527 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37528 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37529 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37530 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37531
37532 #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37533 __typeof(p1) __p1 = (p1); \
37534 __typeof(p2) __p2 = (p2); \
37535 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37536 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37537 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37538 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37539 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37540 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37541 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37542 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37543 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
37544 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
37545 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
37546 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
37547 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
37548 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
37549 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
37550 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
37551 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
37552
37553 #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
37554 __typeof(p1) __p1 = (p1); \
37555 __typeof(p2) __p2 = (p2); \
37556 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37557 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
37558 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37559 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37560 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
37561 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37562 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37563 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
37564 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
37565
37566 #define __arm_vld1q(p0) (\
37567 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37568 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
37569 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
37570 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
37571 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
37572 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
37573 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(p0, uint32_t const *)), \
37574 int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \
37575 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce(p0, float32_t const *))))
37576
37577 #define __arm_vld1q_z(p0,p1) ( \
37578 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37579 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(p0, int8_t const *), p1), \
37580 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce(p0, int16_t const *), p1), \
37581 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce(p0, int32_t const *), p1), \
37582 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce(p0, uint8_t const *), p1), \
37583 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce(p0, uint16_t const *), p1), \
37584 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1), \
37585 int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce(p0, float16_t const *), p1), \
37586 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce(p0, float32_t const *), p1)))
37587
37588 #define __arm_vld2q(p0) ( \
37589 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37590 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
37591 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
37592 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
37593 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
37594 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
37595 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce(p0, uint32_t const *)), \
37596 int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \
37597 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce(p0, float32_t const *))))
37598
37599 #define __arm_vld4q(p0) ( \
37600 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37601 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
37602 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
37603 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
37604 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
37605 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
37606 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce(p0, uint32_t const *)), \
37607 int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce(p0, float16_t const *)), \
37608 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce(p0, float32_t const *))))
37609
37610 #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37611 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37612 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37613 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37614 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37615 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37616 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
37617
37618 #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37619 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37620 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37621 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37622 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37623 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37624 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
37625
37626 #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37627 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37628 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37629 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37630 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37631 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37632 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
37633
37634 #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37635 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37636 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37637 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37638 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37639 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37640 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce1(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
37641
37642 #define __arm_vldrwq_gather_offset(p0,p1) ( \
37643 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37644 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
37645 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
37646 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
37647
37648 #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \
37649 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37650 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
37651 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
37652 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
37653
37654 #define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \
37655 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37656 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int32_t *), p1), \
37657 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1), \
37658 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce1(p0, float32_t *), p1)))
37659
37660 #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \
37661 _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
37662 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int32_t *), p1, p2), \
37663 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint32_t *), p1, p2), \
37664 int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce1(p0, float32_t *), p1, p2)))
37665
37666 #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37667 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37668 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
37669 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37670 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37671 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
37672 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37673 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37674 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \
37675 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37676
37677 #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37678 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37679 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
37680 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
37681 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
37682 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
37683 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
37684 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \
37685 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \
37686 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));})
37687
37688 #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37689 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37690 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
37691 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
37692 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37693 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
37694 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37695 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37696 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
37697 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
37698
37699 #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37700 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37701 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
37702 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37703 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
37704 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37705 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
37706
37707 #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37708 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37709 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
37710 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37711 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
37712 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37713 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
37714
37715 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37716 __typeof(p2) __p2 = (p2); \
37717 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37718 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37719 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37720 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37721 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37722 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37723
37724 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37725 __typeof(p2) __p2 = (p2); \
37726 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37727 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37728 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37729 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37730 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37731 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37732
37733 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37734 __typeof(p2) __p2 = (p2); \
37735 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37736 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37737 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37738 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37739 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37740 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37741
37742 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37743 __typeof(p2) __p2 = (p2); \
37744 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37745 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37746 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37747 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37748 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37749 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37750
37751 #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37752 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37753 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
37754 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
37755 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
37756
37757 #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
37758 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
37759 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
37760 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
37761 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
37762
37763 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37764 __typeof(p2) __p2 = (p2); \
37765 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37766 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37767 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37768 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37769 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37770 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37771
37772 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37773 __typeof(p2) __p2 = (p2); \
37774 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37775 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37776 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37777 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37778 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37779 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37780
37781 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37782 __typeof(p2) __p2 = (p2); \
37783 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37784 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
37785 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
37786 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
37787 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
37788 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
37789
37790 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37791 __typeof(p2) __p2 = (p2); \
37792 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
37793 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
37794 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
37795 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
37796 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37797 int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
37798
37799 #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
37800 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37801 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37802 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37803 int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37804
37805 #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
37806 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
37807 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37808 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37809 int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37810
37811 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37812 __typeof(p2) __p2 = (p2); \
37813 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37814 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37815 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37816 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37817
37818 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37819 __typeof(p2) __p2 = (p2); \
37820 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37821 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37822 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37823 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37824
37825 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37826 __typeof(p2) __p2 = (p2); \
37827 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37828 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37829 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37830 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37831
37832 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37833 __typeof(p2) __p2 = (p2); \
37834 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37835 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37836 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37837 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37838
37839 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37840 __typeof(p2) __p2 = (p2); \
37841 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37842 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37843 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37844 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37845
37846 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37847 __typeof(p2) __p2 = (p2); \
37848 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37849 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37850 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37851 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37852
37853 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
37854 __typeof(p2) __p2 = (p2); \
37855 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37856 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
37857 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
37858 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
37859
37860 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
37861 __typeof(p2) __p2 = (p2); \
37862 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
37863 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
37864 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
37865 int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
37866
37867 #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
37868 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37869 int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
37870 int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
37871 int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
37872 int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
37873 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
37874 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
37875 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
37876 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \
37877 int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \
37878 int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());})
37879
37880 #define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \
37881 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37882 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37883 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37884 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37885 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37886 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37887 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37888 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37889 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37890 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37891
37892 #define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
37893 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37894 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37895 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37896 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37897 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37898 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37899 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37900 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37901 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37902 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
37903
37904 #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
37905 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37906 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37907 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37908 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37909 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37910 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37911 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37912 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37913 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37914 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37915
37916 #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
37917 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37918 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37919 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37920 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37921 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37922 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37923 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37924 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37925 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37926 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37927
37928 #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
37929 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37930 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37931 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37932 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37933 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37934 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37935 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37936 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37937 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37938 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37939
37940 #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
37941 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37942 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37943 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37944 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37945 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37946 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37947 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37948 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37949 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37950 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37951
37952 #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
37953 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37954 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37955 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37956 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37957 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37958 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37959 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37960 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37961 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37962 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37963
37964 #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
37965 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37966 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37967 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37968 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37969 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37970 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37971 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37972 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37973 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37974 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37975
37976 #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
37977 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37978 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37979 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37980 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37981 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37982 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
37983 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37984 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37985 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37986 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37987
37988 #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
37989 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
37990 int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
37991 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
37992 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
37993 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
37994 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
37995 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
37996 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
37997 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
37998 int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
37999
38000 #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
38001 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38002 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
38003 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
38004 int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
38005
38006 #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
38007 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38008 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
38009 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38010 int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
38011
38012 #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38013 __typeof(p2) __p2 = (p2); \
38014 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38015 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38016 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38017 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38018 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38019 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38020 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38021 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38022 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38023
38024 #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38025 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38026 int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38027 int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38028 int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38029 int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38030 int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38031
38032 #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38033 __typeof(p2) __p2 = (p2); \
38034 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38035 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38036 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38037 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38038 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
38039 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
38040 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
38041 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38042 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38043 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38044 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
38045 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
38046 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
38047 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38048 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38049 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38050 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38051
38052 #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38053 __typeof(p2) __p2 = (p2); \
38054 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38055 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38056 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38057 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38058 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38059 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38060 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38061 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38062 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38063
38064 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38065 __typeof(p2) __p2 = (p2); \
38066 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38067 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38068 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38069 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38070 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38071 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38072 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38073 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38074 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38075
38076 #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38077 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38078 int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
38079 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
38080 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
38081 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
38082 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
38083 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
38084 int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
38085 int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
38086
38087 #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38088 __typeof(p2) __p2 = (p2); \
38089 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38090 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38091 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38092 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38093 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38094 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38095 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38096 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38097 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38098
38099 #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38100 __typeof(p2) __p2 = (p2); \
38101 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38102 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38103 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38104 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38105 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38106 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38107 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38108 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38109 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38110
38111 #define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38112 __typeof(p2) __p2 = (p2); \
38113 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38114 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38115 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38116
38117 #define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38118 __typeof(p2) __p2 = (p2); \
38119 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38120 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38121 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38122
38123 #define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38124 __typeof(p2) __p2 = (p2); \
38125 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38126 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38127 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38128
38129 #define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38130 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38131 int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38132 int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38133 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38134 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
38135
38136 #define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38137 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38138 int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
38139 int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
38140 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
38141 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
38142
38143 #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38144 __typeof(p2) __p2 = (p2); \
38145 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38146 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38147 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38148 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38149 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38150 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38151 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38152 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38153 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38154
38155 #define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38156 __typeof(p2) __p2 = (p2); \
38157 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38158 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38159 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38160
38161 #define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38162 __typeof(p2) __p2 = (p2); \
38163 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38164 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38165 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38166
38167 #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38168 __typeof(p2) __p2 = (p2); \
38169 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38170 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38171 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38172 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38173 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
38174 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
38175 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
38176 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38177 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38178 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38179 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
38180 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
38181 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
38182 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38183 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38184 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38185 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38186
38187 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38188 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38189 int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38190 int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38191 int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38192 int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38193 int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38194
38195 #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38196 __typeof(p2) __p2 = (p2); \
38197 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38198 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38199 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38200 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38201 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38202 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38203 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38204 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38205 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38206
38207 #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38208 __typeof(p2) __p2 = (p2); \
38209 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38210 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
38211 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
38212 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
38213 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
38214 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
38215 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
38216 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38217 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38218
38219 #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38220 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38221 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38222 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38223 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
38224 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38225 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));})
38226
38227 #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38228 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38229 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
38230 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
38231 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
38232 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
38233 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
38234 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \
38235 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38236 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38237
38238 #define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38239 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38240 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38241 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38242
38243 #define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38244 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38245 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38246 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38247
38248 #define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38249 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38250 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38251 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38252
38253 #define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38254 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38255 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38256 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38257
38258 #define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38259 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38260 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38261 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38262
38263 #define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
38264 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
38265 int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
38266 int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
38267
38268 #define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38269 __typeof(p2) __p2 = (p2); \
38270 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38271 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38272 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
38273 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce2(__p2, double), p3), \
38274 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce2(__p2, double), p3));})
38275
38276 #define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
38277 __typeof(p2) __p2 = (p2); \
38278 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
38279 int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
38280 int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
38281
38282 #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38283 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38284 int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38285 int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38286 int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38287 int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
38288 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38289 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38290 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
38291 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1), \
38292 int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
38293 int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
38294
38295 #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
38296 __typeof(p1) __p1 = (p1); \
38297 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38298 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
38299 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
38300 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
38301 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
38302 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
38303 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
38304 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
38305 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
38306 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
38307 int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce2(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
38308
38309 #else /* MVE Integer. */
38310
38311 #define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
38312 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38313 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
38314 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
38315
38316 #define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
38317 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
38318 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
38319 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
38320
38321 #define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
38322 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
38323 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
38324 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
38325 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
38326 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
38327 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
38328 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
38329
38330 #define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
38331 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38332 int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38333 int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38334 int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38335
38336 #define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
38337 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38338 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38339 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38340 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38341
38342 #define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
38343 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38344 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38345 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38346 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38347 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38348 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38349 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38350
38351 #define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
38352 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38353 int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38354 int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38355 int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38356
38357 #define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
38358 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38359 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38360 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38361 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38362 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38363
38364 #define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
38365 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38366 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38367 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38368 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38369 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38370
38371 #define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
38372 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38373 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38374 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38375 int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38376 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38377 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38378 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38379
38380 #define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
38381 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38382 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38383 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
38384
38385 #define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
38386 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38387 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38388 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38389 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38390 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
38391
38392 #define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
38393 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38394 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38395 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38396 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
38397 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
38398 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
38399 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
38400
38401 #define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
38402 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38403 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38404 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38405 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38406
38407 #define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
38408 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38409 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
38410 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
38411 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
38412
38413 #define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38414 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38415 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38416 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38417 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38418 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38419 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38420 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38421
38422 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38423 __typeof(p1) __p1 = (p1); \
38424 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38425 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38426 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38427 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38428 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38429 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38430 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38431
38432 #define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38433 __typeof(p1) __p1 = (p1); \
38434 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38435 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38436 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38437 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38438 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38439 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38440 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38441
38442 #define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38443 __typeof(p1) __p1 = (p1); \
38444 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38445 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38446 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38447 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38448 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38449 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38450 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38451 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38452 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38453 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38454 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38455 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38456 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
38457
38458 #define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38459 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38460 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38461 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38462 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38463 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38464 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38465 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38466
38467 #define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38468 __typeof(p1) __p1 = (p1); \
38469 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38470 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38471 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38472 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38473 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38474 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38475 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38476 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38477 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38478 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38479 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38480 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38481 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38482
38483 #define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38484 __typeof(p1) __p1 = (p1); \
38485 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38486 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38487 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38488 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38489 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38490 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38491 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38492
38493 #define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38494 __typeof(p1) __p1 = (p1); \
38495 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38496 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38497 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38498 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38499 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38500 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38501 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38502
38503 #define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38504 __typeof(p1) __p1 = (p1); \
38505 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38506 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38507 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38508 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38509 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38510 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38511 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38512 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38513 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38514 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38515 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38516 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38517 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38518
38519 #define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38520 __typeof(p1) __p1 = (p1); \
38521 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38522 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38523 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38524 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38525 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38526 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38527 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38528
38529 #define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38530 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38531 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38532 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38533 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38534 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38535 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38536 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38537
38538 #define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38539 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38540 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38541 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38542 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
38543
38544 #define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38545 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38546 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38547 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38548 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38549 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38550 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38551 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38552
38553 #define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38554 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38555 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38556 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38557 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38558 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38559 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38560 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38561
38562 #define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38563 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38564 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38565 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38566 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38567 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38568 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38569 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38570
38571 #define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38572 __typeof(p1) __p1 = (p1); \
38573 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38574 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38575 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38576 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38577 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38578 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38579 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38580 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38581 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38582 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38583 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
38584 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
38585 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38586
38587 #define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38588 __typeof(p1) __p1 = (p1); \
38589 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38590 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38591 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38592 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38593 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38594 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38595 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38596
38597 #define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38598 __typeof(p1) __p1 = (p1); \
38599 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38600 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38601 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38602 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38603 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38604 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38605 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38606
38607 #define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38608 __typeof(p1) __p1 = (p1); \
38609 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38610 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38611 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38612 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38613 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38614 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38615 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38616 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38617 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38618 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38619 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38620 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38621 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38622
38623 #define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38624 __typeof(p1) __p1 = (p1); \
38625 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38626 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38627 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38628 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38629 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38630 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38631 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38632
38633 #define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38634 __typeof(p1) __p1 = (p1); \
38635 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38636 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38637 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38638 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38639 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38640 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38641 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38642
38643 #define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38644 __typeof(p1) __p1 = (p1); \
38645 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38646 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38647 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38648 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38649 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38650 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38651 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38652 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38653 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38654 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38655 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38656 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38657 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38658
38659 #define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38660 __typeof(p1) __p1 = (p1); \
38661 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38662 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38663 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38664 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38665 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38666 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38667 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38668
38669 #define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38670 __typeof(p1) __p1 = (p1); \
38671 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38672 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38673 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38674 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38675 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38676 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38677 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38678
38679 #define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38680 __typeof(p1) __p1 = (p1); \
38681 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38682 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38683 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38684 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38685 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38686 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38687 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38688
38689 #define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38690 __typeof(p1) __p1 = (p1); \
38691 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38692 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38693 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38694 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38695 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38696 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38697 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38698
38699 #define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38700 __typeof(p1) __p1 = (p1); \
38701 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38702 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38703 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38704 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38705
38706 #define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38707 __typeof(p1) __p1 = (p1); \
38708 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38709 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38710 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38711 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38712 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38713 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38714 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38715
38716 #define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38717 __typeof(p1) __p1 = (p1); \
38718 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38719 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38720 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38721 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38722
38723 #define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38724 __typeof(p1) __p1 = (p1); \
38725 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38726 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38727 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38728 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38729 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38730 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38731 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38732 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38733 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38734 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38735 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38736 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38737 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38738
38739 #define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38740 __typeof(p1) __p1 = (p1); \
38741 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38742 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38743 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38744 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38745
38746 #define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38747 __typeof(p1) __p1 = (p1); \
38748 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38749 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38750 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38751 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38752
38753 #define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38754 __typeof(p1) __p1 = (p1); \
38755 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38756 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38757 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38758 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38759 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38760 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38761 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
38762 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38763 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38764 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38765 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38766 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38767 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38768
38769 #define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38770 __typeof(p1) __p1 = (p1); \
38771 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38772 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38773 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38774 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38775 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38776 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38777 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38778
38779 #define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38780 __typeof(p1) __p1 = (p1); \
38781 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38782 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38783 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38784 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38785 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38786 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38787 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38788
38789 #define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38790 __typeof(p1) __p1 = (p1); \
38791 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38792 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38793 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38794 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38795 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38796 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38797 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38798
38799 #define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38800 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38801 int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38802 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38803 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
38804 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38805 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
38806 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
38807
38808 #define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38809 __typeof(p1) __p1 = (p1); \
38810 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38811 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1 (__p1, int)), \
38812 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1 (__p1, int)), \
38813 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1 (__p1, int)), \
38814 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1 (__p1, int)), \
38815 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38816 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38817 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38818 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38819 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38820 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38821
38822 #define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38823 __typeof(p1) __p1 = (p1); \
38824 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38825 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38826 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38827 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38828 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38829 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38830 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38831 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int)), \
38832 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
38833 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)), \
38834 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int)), \
38835 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
38836 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)));})
38837
38838 #define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38839 __typeof(p1) __p1 = (p1); \
38840 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38841 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38842 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38843 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38844 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38845 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38846 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38847
38848 #define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38849 __typeof(p1) __p1 = (p1); \
38850 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38851 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38852 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38853 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38854 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38855 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38856 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38857
38858 #define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38859 __typeof(p1) __p1 = (p1); \
38860 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38861 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38862 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38863 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38864 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38865 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38866 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38867 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38868 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38869 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38870 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38871 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38872 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
38873
38874 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38875 __typeof(p1) __p1 = (p1); \
38876 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38877 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38878 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38879 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38880 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38881 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38882 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
38883 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38884 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38885 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38886 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
38887 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
38888 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
38889
38890
38891 #define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38892 __typeof(p1) __p1 = (p1); \
38893 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38894 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38895 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38896 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38897 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38898
38899 #define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38900 __typeof(p1) __p1 = (p1); \
38901 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38902 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38903 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38904 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38905 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38906
38907 #define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38908 __typeof(p1) __p1 = (p1); \
38909 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38910 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38911 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
38912
38913 #define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38914 __typeof(p1) __p1 = (p1); \
38915 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38916 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
38917 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
38918
38919 #define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38920 __typeof(p1) __p1 = (p1); \
38921 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38922 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38923 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38924 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38925 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38926
38927 #define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38928 __typeof(p1) __p1 = (p1); \
38929 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38930 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38931 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38932 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
38933 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
38934
38935 #define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38936 __typeof(p1) __p1 = (p1); \
38937 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38938 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38939 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38940
38941 #define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38942 __typeof(p1) __p1 = (p1); \
38943 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38944 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38945 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38946
38947 #define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38948 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38949 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38950 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38951 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38952 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
38953
38954 #define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38955 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
38956 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
38957 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
38958 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
38959 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
38960
38961 #define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38962 __typeof(p1) __p1 = (p1); \
38963 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38964 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38965 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38966
38967 #define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38968 __typeof(p1) __p1 = (p1); \
38969 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38970 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38971 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38972 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38973 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38974
38975 #define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38976 __typeof(p1) __p1 = (p1); \
38977 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38978 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38979 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
38980 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38981 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
38982
38983 #define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38984 __typeof(p1) __p1 = (p1); \
38985 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38986 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38987 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38988 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38989 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
38990 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
38991 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
38992
38993 #define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
38994 __typeof(p1) __p1 = (p1); \
38995 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
38996 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
38997 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
38998 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
38999 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
39000 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
39001 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
39002
39003 #define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39004 __typeof(p1) __p1 = (p1); \
39005 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39006 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
39007 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
39008 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
39009 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
39010 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
39011 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
39012
39013 #define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39014 __typeof(p1) __p1 = (p1); \
39015 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39016 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
39017 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
39018 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
39019 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
39020 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
39021 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
39022
39023 #define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39024 __typeof(p1) __p1 = (p1); \
39025 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39026 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39027 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39028 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39029 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
39030 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
39031 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
39032 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
39033 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39034 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39035 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39036 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39037 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39038
39039 #define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39040 __typeof(p1) __p1 = (p1); \
39041 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39042 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
39043 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
39044 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
39045 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
39046 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
39047 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
39048 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
39049 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
39050 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
39051 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
39052 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
39053 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
39054
39055 #define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39056 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39057 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39058 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39059 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39060 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39061 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39062 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39063
39064 #define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39065 __typeof(p1) __p1 = (p1); \
39066 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39067 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39068 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39069 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39070 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39071 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39072 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39073 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39074 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39075 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
39076 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
39077 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
39078 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
39079
39080 #define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39081 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39082 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39083 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39084 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39085 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39086
39087 #define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39088 __typeof(p1) __p1 = (p1); \
39089 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39090 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39091 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39092 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39093 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39094
39095 #define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39096 __typeof(p1) __p1 = (p1); \
39097 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39098 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39099 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39100
39101 #define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39102 __typeof(p1) __p1 = (p1); \
39103 __typeof(p2) __p2 = (p2); \
39104 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39105 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39106 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39107 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39108
39109 #define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39110 __typeof(p1) __p1 = (p1); \
39111 __typeof(p2) __p2 = (p2); \
39112 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39113 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39114 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39115 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39116
39117 #define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39118 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39119 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39120 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39121 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39122 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39123 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39124 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39125
39126 #define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39127 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39128 int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39129 int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39130 int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39131 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39132 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39133 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39134
39135 #define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39136 __typeof(p1) __p1 = (p1); \
39137 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39138 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39139 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39140 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39141 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39142 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39143 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39144
39145 #define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39146 __typeof(p1) __p1 = (p1); \
39147 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39148 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39149 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39150 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39151 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39152 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39153 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
39154
39155 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39156 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39157 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
39158 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
39159 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
39160 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
39161 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
39162 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
39163
39164 #define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39165 __typeof(p1) __p1 = (p1); \
39166 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39167 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39168 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39169 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39170 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39171 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39172 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39173
39174 #define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39175 __typeof(p1) __p1 = (p1); \
39176 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39177 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39178 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39179 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39180 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39181 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39182 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39183
39184 #define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39185 __typeof(p1) __p1 = (p1); \
39186 __typeof(p2) __p2 = (p2); \
39187 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39188 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39189 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39190 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39191
39192 #define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39193 __typeof(p1) __p1 = (p1); \
39194 __typeof(p2) __p2 = (p2); \
39195 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39196 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39197 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39198 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39199
39200 #define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39201 __typeof(p1) __p1 = (p1); \
39202 __typeof(p2) __p2 = (p2); \
39203 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39204 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39205 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39206 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39207
39208 #define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39209 __typeof(p1) __p1 = (p1); \
39210 __typeof(p2) __p2 = (p2); \
39211 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39212 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39213 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39214 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39215
39216 #define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39217 __typeof(p1) __p1 = (p1); \
39218 __typeof(p2) __p2 = (p2); \
39219 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39220 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39221 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39222 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39223
39224 #define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39225 __typeof(p1) __p1 = (p1); \
39226 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39227 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39228 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39229 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39230
39231 #define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39232 __typeof(p1) __p1 = (p1); \
39233 __typeof(p2) __p2 = (p2); \
39234 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39235 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39236 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39237 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39238
39239 #define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39240 __typeof(p1) __p1 = (p1); \
39241 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39242 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39243 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39244 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39245
39246 #define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39247 __typeof(p1) __p1 = (p1); \
39248 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39249 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39250 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39251 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39252
39253 #define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39254 __typeof(p1) __p1 = (p1); \
39255 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39256 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39257 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39258 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39259 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39260 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39261 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39262
39263 #define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39264 __typeof(p1) __p1 = (p1); \
39265 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39266 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39267 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39268 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39269 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39270 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39271 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
39272
39273 #define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39274 __typeof(p1) __p1 = (p1); \
39275 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39276 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
39277 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
39278 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
39279 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
39280 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
39281 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2));})
39282
39283 #define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39284 __typeof(p1) __p1 = (p1); \
39285 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39286 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39287 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39288 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39289
39290 #define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39291 __typeof(p1) __p1 = (p1); \
39292 __typeof(p2) __p2 = (p2); \
39293 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39294 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39295 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39296 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
39297 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
39298 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
39299 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
39300
39301 #define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39302 __typeof(p1) __p1 = (p1); \
39303 __typeof(p2) __p2 = (p2); \
39304 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39305 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39306 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39307 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)), \
39308 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t)), \
39309 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t)), \
39310 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t)));})
39311
39312 #define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39313 __typeof(p1) __p1 = (p1); \
39314 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39315 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39316 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39317 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39318
39319 #define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39320 __typeof(p1) __p1 = (p1); \
39321 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39322 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39323 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39324 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39325 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
39326 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39327 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39328 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39329 int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
39330
39331 #define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39332 __typeof(p1) __p1 = (p1); \
39333 __typeof(p2) __p2 = (p2); \
39334 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39335 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t)), \
39336 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t)), \
39337 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t)));})
39338
39339 #define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39340 __typeof(p1) __p1 = (p1); \
39341 __typeof(p2) __p2 = (p2); \
39342 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39343 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39344 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39345 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39346
39347 #define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39348 __typeof(p1) __p1 = (p1); \
39349 __typeof(p2) __p2 = (p2); \
39350 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39351 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39352 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39353 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39354
39355 #define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39356 __typeof(p1) __p1 = (p1); \
39357 __typeof(p2) __p2 = (p2); \
39358 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39359 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39360 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39361 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
39362
39363 #define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39364 __typeof(p1) __p1 = (p1); \
39365 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39366 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39367 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39368 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39369
39370 #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39371 __typeof(p1) __p1 = (p1); \
39372 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39373 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39374 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39375 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39376 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39377 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39378 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
39379
39380 #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39381 __typeof(p1) __p1 = (p1); \
39382 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39383 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39384 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39385 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39386 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39387 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39388 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
39389
39390 #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39391 __typeof(p1) __p1 = (p1); \
39392 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39393 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39394 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39395 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39396 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
39397 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
39398 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2));})
39399
39400 #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39401 __typeof(p1) __p1 = (p1); \
39402 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39403 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39404 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39405 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39406 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39407
39408 #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39409 __typeof(p1) __p1 = (p1); \
39410 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39411 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39412 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39413 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39414 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39415
39416 #define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39417 __typeof(p1) __p1 = (p1); \
39418 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39419 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39420 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39421 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39422 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39423
39424 #define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39425 __typeof(p1) __p1 = (p1); \
39426 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39427 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39428 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39429 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39430 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39431
39432 #define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39433 __typeof(p1) __p1 = (p1); \
39434 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39435 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39436 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39437 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39438 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39439
39440 #define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39441 __typeof(p1) __p1 = (p1); \
39442 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39443 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39444 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39445 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39446 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39447
39448 #define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39449 __typeof(p1) __p1 = (p1); \
39450 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39451 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39452 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39453 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39454 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39455
39456 #define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39457 __typeof(p1) __p1 = (p1); \
39458 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39459 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39460 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39461 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39462 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39463
39464 #define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39465 __typeof(p1) __p1 = (p1); \
39466 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39467 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39468 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39469
39470 #define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39471 __typeof(p1) __p1 = (p1); \
39472 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39473 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39474 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
39475
39476 #define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39477 __typeof(p1) __p1 = (p1); \
39478 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39479 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39480 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39481 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39482 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39483
39484 #define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39485 __typeof(p1) __p1 = (p1); \
39486 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39487 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39488 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39489
39490 #define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39491 __typeof(p1) __p1 = (p1); \
39492 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39493 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39494 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39495 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39496 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39497
39498 #define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39499 __typeof(p1) __p1 = (p1); \
39500 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39501 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39502 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39503 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39504 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39505
39506 #define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39507 __typeof(p1) __p1 = (p1); \
39508 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39509 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39510 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39511
39512 #define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39513 __typeof(p1) __p1 = (p1); \
39514 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39515 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39516 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39517 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39518 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39519
39520 #define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39521 __typeof(p1) __p1 = (p1); \
39522 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39523 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39524 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39525 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39526 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39527
39528 #define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39529 __typeof(p1) __p1 = (p1); \
39530 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39531 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39532 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39533 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39534 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
39535
39536 #define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39537 __typeof(p1) __p1 = (p1); \
39538 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39539 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39540 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
39541
39542 #define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39543 __typeof(p1) __p1 = (p1); \
39544 __typeof(p2) __p2 = (p2); \
39545 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39546 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
39547 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
39548 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
39549 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
39550 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
39551 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
39552 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39553 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39554 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39555 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39556 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39557 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39558
39559 #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39560 __typeof(p1) __p1 = (p1); \
39561 __typeof(p2) __p2 = (p2); \
39562 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39563 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39564 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39565 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39566 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39567 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39568 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39569
39570 #define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39571 __typeof(p1) __p1 = (p1); \
39572 __typeof(p2) __p2 = (p2); \
39573 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39574 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39575 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39576 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39577 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39578 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39579 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39580
39581 #define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39582 __typeof(p1) __p1 = (p1); \
39583 __typeof(p2) __p2 = (p2); \
39584 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39585 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39586 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39587 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39588 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39589 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39590 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39591
39592 #define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39593 __typeof(p1) __p1 = (p1); \
39594 __typeof(p2) __p2 = (p2); \
39595 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39596 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39597 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39598 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39599 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39600 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39601 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39602
39603 #define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39604 __typeof(p1) __p1 = (p1); \
39605 __typeof(p2) __p2 = (p2); \
39606 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39607 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \
39608 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \
39609 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \
39610 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \
39611 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \
39612 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));})
39613
39614 #define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39615 __typeof(p1) __p1 = (p1); \
39616 __typeof(p2) __p2 = (p2); \
39617 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39618 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39619 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39620 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39621 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39622 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39623 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39624
39625 #define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39626 __typeof(p1) __p1 = (p1); \
39627 __typeof(p2) __p2 = (p2); \
39628 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39629 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39630 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39631 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39632 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39633 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39634 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39635
39636 #define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39637 __typeof(p1) __p1 = (p1); \
39638 __typeof(p2) __p2 = (p2); \
39639 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39640 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39641 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39642 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39643 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39644 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39645 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39646
39647 #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39648 __typeof(p1) __p1 = (p1); \
39649 __typeof(p2) __p2 = (p2); \
39650 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39651 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39652 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39653 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39654 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39655 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39656 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39657
39658 #define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39659 __typeof(p1) __p1 = (p1); \
39660 __typeof(p2) __p2 = (p2); \
39661 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39662 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39663 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39664 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39665 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39666 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39667 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39668
39669 #define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39670 __typeof(p1) __p1 = (p1); \
39671 __typeof(p2) __p2 = (p2); \
39672 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39673 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39674 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39675 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39676 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39677 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39678 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39679
39680 #define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39681 __typeof(p1) __p1 = (p1); \
39682 __typeof(p2) __p2 = (p2); \
39683 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39684 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int), p3), \
39685 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int), p3), \
39686 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int), p3), \
39687 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int), p3), \
39688 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int), p3), \
39689 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int), p3), \
39690 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39691 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39692 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39693 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39694 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39695 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39696
39697 #define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39698 __typeof(p1) __p1 = (p1); \
39699 __typeof(p2) __p2 = (p2); \
39700 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39701 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
39702 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
39703 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
39704 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
39705 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
39706 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
39707 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39708 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39709 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39710 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39711 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39712 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39713
39714 #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39715 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39716 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
39717 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
39718 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39719 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39720 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39721 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39722
39723 #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39724 __typeof(p1) __p1 = (p1); \
39725 __typeof(p2) __p2 = (p2); \
39726 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39727 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
39728 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39729 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39730 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
39731 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39732 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39733
39734 #define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
39735 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39736 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
39737 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39738
39739 #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39740 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39741 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39742 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39743 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39744 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39745 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39746 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39747
39748 #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39749 __typeof(p1) __p1 = (p1); \
39750 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
39751 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39752 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39753 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39754 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39755 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39756 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39757
39758 #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39759 __typeof(p1) __p1 = (p1); \
39760 __typeof(p2) __p2 = (p2); \
39761 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39762 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
39763 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39764 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39765 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
39766 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39767 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39768
39769 #define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
39770 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39771 int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
39772 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39773
39774 #define __arm_vld1q(p0) (_Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
39775 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
39776 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
39777 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
39778 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
39779 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
39780 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce(p0, uint32_t const *))))
39781
39782 #define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39783 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39784 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39785 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39786 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39787 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39788
39789 #define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39790 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39791 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39792 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39793 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39794 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39795
39796 #define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39797 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39798 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39799 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
39800 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39801 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39802
39803 #define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39804 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39805 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39806 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
39807 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39808 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39809
39810 #define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39811 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39812 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
39813 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
39814
39815 #define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39816 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39817 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
39818 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
39819
39820 #define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
39821 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39822 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1), \
39823 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1));})
39824
39825 #define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39826 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
39827 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce1(__p0, int32_t *), p1, p2), \
39828 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce1(__p0, uint32_t *), p1, p2));})
39829
39830 #define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39831 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39832 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
39833 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
39834 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39835 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
39836 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39837 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39838
39839 #define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39840 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39841 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
39842 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39843 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39844 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
39845 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39846 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39847
39848 #define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39849 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39850 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
39851 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
39852 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
39853 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
39854 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
39855 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));})
39856
39857 #define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39858 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39859 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
39860 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39861 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
39862 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39863
39864 #define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39865 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39866 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
39867 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39868 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
39869 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39870
39871 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39872 __typeof(p2) __p2 = (p2); \
39873 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39874 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39875 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39876 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39877 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39878
39879 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39880 __typeof(p2) __p2 = (p2); \
39881 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39882 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39883 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39884 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39885 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39886
39887 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39888 __typeof(p2) __p2 = (p2); \
39889 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39890 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39891 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39892 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39893 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39894
39895 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39896 __typeof(p2) __p2 = (p2); \
39897 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39898 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39899 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39900 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39901 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39902
39903
39904 #define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
39905 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39906 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
39907 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
39908
39909 #define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39910 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
39911 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
39912 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
39913
39914 #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
39915 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39916 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
39917 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
39918
39919 #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
39920 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
39921 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
39922 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
39923
39924 #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39925 __typeof(p1) __p1 = (p1); \
39926 __typeof(p2) __p2 = (p2); \
39927 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39928 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
39929 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
39930
39931 #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39932 __typeof(p1) __p1 = (p1); \
39933 __typeof(p2) __p2 = (p2); \
39934 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39935 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \
39936 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
39937
39938 #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
39939 __typeof(p1) __p1 = (p1); \
39940 __typeof(p2) __p2 = (p2); \
39941 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39942 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
39943 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
39944
39945 #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
39946 __typeof(p1) __p1 = (p1); \
39947 __typeof(p2) __p2 = (p2); \
39948 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
39949 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \
39950 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
39951
39952 #define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39953 __typeof(p2) __p2 = (p2); \
39954 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39955 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39956 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39957 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39958 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39959
39960 #define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39961 __typeof(p2) __p2 = (p2); \
39962 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39963 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39964 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39965 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39966 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39967
39968 #define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39969 __typeof(p2) __p2 = (p2); \
39970 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39971 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
39972 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
39973 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
39974 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
39975
39976 #define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39977 __typeof(p2) __p2 = (p2); \
39978 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
39979 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
39980 int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
39981 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
39982 int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
39983
39984 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
39985 __typeof(p2) __p2 = (p2); \
39986 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
39987 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
39988 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39989
39990 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39991 __typeof(p2) __p2 = (p2); \
39992 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
39993 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
39994 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
39995
39996 #define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
39997 __typeof(p2) __p2 = (p2); \
39998 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
39999 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
40000 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40001
40002 #define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
40003 __typeof(p2) __p2 = (p2); \
40004 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
40005 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
40006 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
40007
40008 #define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
40009 __typeof(p2) __p2 = (p2); \
40010 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
40011 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
40012 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
40013
40014 #define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40015 __typeof(p2) __p2 = (p2); \
40016 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
40017 int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
40018 int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40019
40020 #define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
40021 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40022 int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
40023 int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
40024 int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
40025 int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
40026 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
40027 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
40028 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
40029 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());})
40030
40031 #define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
40032 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40033 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40034 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40035 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40036 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40037 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40038 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40039 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40040
40041 #define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
40042 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40043 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40044 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40045 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40046 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40047 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40048 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40049 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40050
40051 #define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
40052 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40053 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40054 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40055 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40056 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40057 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40058 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40059 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40060
40061 #define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
40062 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40063 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40064 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40065 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40066 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40067 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40068 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40069 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40070
40071 #define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
40072 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40073 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40074 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40075 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40076 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40077 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40078 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40079 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40080
40081 #define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
40082 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40083 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40084 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40085 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40086 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40087 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40088 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40089 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40090
40091 #define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
40092 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40093 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40094 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40095 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40096 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
40097 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40098 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40099 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));})
40100
40101 #define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
40102 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40103 int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
40104 int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
40105 int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
40106 int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
40107 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
40108 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
40109 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
40110
40111 #define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40112 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40113 int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40114 int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40115 int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
40116
40117 #define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40118 __typeof(p2) __p2 = (p2); \
40119 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40120 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40121 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40122 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40123 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40124 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40125 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40126 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40127 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40128 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
40129 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40130 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40131 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
40132
40133 #define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40134 __typeof(p2) __p2 = (p2); \
40135 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40136 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40137 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40138 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40139 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40140 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40141 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40142
40143 #define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40144 __typeof(p2) __p2 = (p2); \
40145 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40146 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40147 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40148 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40149 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40150 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40151 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40152
40153 #define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40154 __typeof(p2) __p2 = (p2); \
40155 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40156 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40157 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40158 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40159 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40160 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40161 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40162
40163 #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40164 __typeof(p2) __p2 = (p2); \
40165 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40166 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40167 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40168 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40169 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40170 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40171 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40172 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40173 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40174 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
40175 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40176 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40177 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
40178
40179 #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40180 __typeof(p2) __p2 = (p2); \
40181 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40182 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40183 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40184 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40185 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40186 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40187 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40188
40189 #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40190 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40191 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40192 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40193 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40194 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
40195
40196 #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40197 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40198 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40199 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40200 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40201 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
40202
40203 #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40204 __typeof(p2) __p2 = (p2); \
40205 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40206 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40207 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40208 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40209 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40210 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40211 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40212
40213 #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40214 __typeof(p2) __p2 = (p2); \
40215 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40216 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40217 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40218 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40219 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40220 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40221 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40222
40223 #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40224 __typeof(p2) __p2 = (p2); \
40225 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40226 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40227 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
40228
40229 #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40230 __typeof(p2) __p2 = (p2); \
40231 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40232 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40233 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40234 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40235 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40236 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40237 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40238
40239 #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40240 __typeof(p2) __p2 = (p2); \
40241 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40242 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40243 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
40244
40245 #define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40246 __typeof(p2) __p2 = (p2); \
40247 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40248 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40249 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40250 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40251 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40252 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40253 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40254 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40255 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40256 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
40257 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40258 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40259 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
40260
40261 #define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40262 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40263 int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40264 int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40265 int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
40266
40267 #define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40268 __typeof(p2) __p2 = (p2); \
40269 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40270 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40271 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40272 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40273 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40274 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40275 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40276
40277 #define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40278 __typeof(p2) __p2 = (p2); \
40279 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40280 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40281 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40282 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40283 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40284 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40285 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40286
40287 #define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40288 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40289 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40290 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40291 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40292 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
40293
40294 #define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40295 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40296 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40297 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40298 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40299 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40300 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40301 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40302
40303 #define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40304 __typeof(p2) __p2 = (p2); \
40305 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40306 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40307 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40308 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40309 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40310 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40311 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40312
40313 #define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40314 __typeof(p2) __p2 = (p2); \
40315 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40316 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40317 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40318 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40319 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40320 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40321 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40322
40323 #define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40324 __typeof(p2) __p2 = (p2); \
40325 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40326 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40327 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40328 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40329 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40330 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40331 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40332
40333 #define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40334 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40335 int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40336 int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40337 int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40338 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40339 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40340 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40341
40342 #define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40343 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce(p0, int8_t const *), p1), \
40344 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce(p0, int16_t const *), p1), \
40345 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce(p0, int32_t const *), p1), \
40346 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce(p0, uint8_t const *), p1), \
40347 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce(p0, uint16_t const *), p1), \
40348 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce(p0, uint32_t const *), p1)))
40349
40350 #define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40351 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
40352 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
40353 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
40354 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
40355 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
40356 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce(p0, uint32_t const *))))
40357
40358
40359 #define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40360 int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce(p0, int8_t const *)), \
40361 int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce(p0, int16_t const *)), \
40362 int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce(p0, int32_t const *)), \
40363 int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce(p0, uint8_t const *)), \
40364 int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce(p0, uint16_t const *)), \
40365 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce(p0, uint32_t const *))))
40366
40367 #define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
40368 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
40369 int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
40370 int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
40371 int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
40372 int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
40373 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
40374 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
40375 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
40376 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));})
40377
40378 #define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40379 __typeof(p1) __p1 = (p1); \
40380 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40381 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
40382 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40383 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40384 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
40385 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40386 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40387 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
40388 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
40389
40390 #endif /* MVE Integer. */
40391
40392 #define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40393 __typeof(p1) __p1 = (p1); \
40394 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40395 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40396 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40397 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40398 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40399
40400
40401 #define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40402 __typeof(p1) __p1 = (p1); \
40403 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40404 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
40405 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40406 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40407 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40408
40409
40410 #define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40411 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40412 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40413 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40414 int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40415 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40416 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40417 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40418
40419 #define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40420 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40421 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40422 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));})
40423
40424 #define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40425 __typeof(p2) __p2 = (p2); \
40426 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40427 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40428 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40429 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40430 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40431 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40432 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40433
40434 #define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40435 __typeof(p2) __p2 = (p2); \
40436 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40437 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40438 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40439 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40440 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40441 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40442 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40443
40444 #define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40445 __typeof(p2) __p2 = (p2); \
40446 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40447 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40448 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40449 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40450 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40451 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40452 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40453
40454 #define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40455 __typeof(p2) __p2 = (p2); \
40456 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40457 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40458 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40459 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40460 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40461 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40462 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40463
40464 #define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40465 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40466 int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40467 int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40468 int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40469 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40470 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40471 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40472
40473 #define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40474 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40475 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40476 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40477 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40478 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40479
40480 #define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40481 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40482 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40483 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40484 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40485 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40486
40487 #define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40488 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40489 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40490 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40491 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40492 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40493 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40494 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40495
40496 #define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40497 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40498 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
40499 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40500
40501 #define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40502 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40503 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
40504 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40505
40506 #define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40507 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40508 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
40509 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40510
40511 #define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40512 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40513 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
40514 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40515
40516 #define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40517 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40518 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
40519 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40520
40521 #define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
40522 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40523 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
40524 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
40525
40526 #define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40527 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40528 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
40529 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40530
40531 #define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40532 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40533 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
40534 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40535
40536 #define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40537 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40538 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
40539 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40540
40541 #define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40542 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40543 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
40544 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40545
40546 #define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40547 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40548 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
40549 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40550
40551 #define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40552 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40553 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
40554 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
40555
40556 #define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40557 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40558 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40559 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40560 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40561 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40562 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40563 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40564
40565 #define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40566 __typeof(p2) __p2 = (p2); \
40567 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40568 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40569 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40570 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40571 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40572 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40573 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40574 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40575 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40576 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40577 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40578 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40579 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40580
40581 #define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40582 __typeof(p2) __p2 = (p2); \
40583 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40584 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40585 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40586 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40587
40588 #define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40589 __typeof(p2) __p2 = (p2); \
40590 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40591 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40592 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40593 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40594
40595 #define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
40596 __typeof(p2) __p2 = (p2); \
40597 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40598 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40599 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40600 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40601 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40602 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40603 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40604 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40605 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40606 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40607 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40608 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40609 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40610
40611 #define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40612 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40613 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40614 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40615 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
40616
40617 #define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
40618 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
40619 int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
40620 int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
40621 int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
40622 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
40623 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
40624 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
40625
40626 #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40627 __typeof(p1) __p1 = (p1); \
40628 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40629 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40630 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40631
40632 #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
40633 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
40634 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
40635 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
40636
40637 #define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
40638 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
40639 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
40640 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
40641
40642 #define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40643 int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
40644 int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
40645
40646 #define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40647 int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
40648 int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
40649
40650 #define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40651 int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce1(p0, int64_t *), p1), \
40652 int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1)))
40653
40654 #define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
40655 int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce1(p0, int64_t *), p1, p2), \
40656 int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce1(p0, uint64_t *), p1, p2)))
40657
40658 #define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40659 __typeof(p1) __p1 = (p1); \
40660 __typeof(p2) __p2 = (p2); \
40661 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40662 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40663 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40664
40665 #define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40666 __typeof(p1) __p1 = (p1); \
40667 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40668 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40669 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40670
40671 #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40672 __typeof(p1) __p1 = (p1); \
40673 __typeof(p2) __p2 = (p2); \
40674 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40675 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40676 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40677
40678 #define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40679 __typeof(p1) __p1 = (p1); \
40680 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40681 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40682 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40683
40684 #define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40685 __typeof(p1) __p1 = (p1); \
40686 __typeof(p2) __p2 = (p2); \
40687 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40688 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40689 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40690
40691 #define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40692 __typeof(p1) __p1 = (p1); \
40693 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40694 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40695 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40696
40697 #define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
40698 __typeof(p1) __p1 = (p1); \
40699 __typeof(p2) __p2 = (p2); \
40700 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40701 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
40702 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
40703
40704 #define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
40705 __typeof(p1) __p1 = (p1); \
40706 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40707 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
40708 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40709
40710 #define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
40711 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
40712 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40713 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40714 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
40715 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
40716 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
40717 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
40718
40719 #define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40720 __typeof(p1) __p1 = (p1); \
40721 __typeof(p2) __p2 = (p2); \
40722 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40723 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40724 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40725 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40726
40727 #define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40728 __typeof(p1) __p1 = (p1); \
40729 __typeof(p2) __p2 = (p2); \
40730 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40731 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40732 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40733 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40734
40735 #define __arm_vqdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40736 __typeof(p1) __p1 = (p1); \
40737 __typeof(p2) __p2 = (p2); \
40738 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40739 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40740 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40741 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40742
40743 #define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40744 __typeof(p1) __p1 = (p1); \
40745 __typeof(p2) __p2 = (p2); \
40746 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40747 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40748 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40749 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40750 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40751 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40752 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40753
40754 #define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40755 __typeof(p1) __p1 = (p1); \
40756 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40757 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40758 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40759 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40760 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40761 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40762 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40763
40764 #define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40765 __typeof(p1) __p1 = (p1); \
40766 __typeof(p2) __p2 = (p2); \
40767 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40768 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40769 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40770 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40771 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40772 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40773 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40774
40775 #define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40776 __typeof(p1) __p1 = (p1); \
40777 __typeof(p2) __p2 = (p2); \
40778 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40779 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40780 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40781 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40782 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40783 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40784 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40785
40786 #define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40787 __typeof(p1) __p1 = (p1); \
40788 __typeof(p2) __p2 = (p2); \
40789 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40790 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40791 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40792 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40793 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40794 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40795 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40796
40797 #define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40798 __typeof(p1) __p1 = (p1); \
40799 __typeof(p2) __p2 = (p2); \
40800 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40801 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40802 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40803 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40804 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40805 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40806 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40807
40808 #define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40809 __typeof(p1) __p1 = (p1); \
40810 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40811 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40812 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40813 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40814 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40815 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40816 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40817
40818 #define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40819 __typeof(p1) __p1 = (p1); \
40820 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40821 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40822 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40823 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40824 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40825 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40826 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40827
40828 #define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40829 __typeof(p1) __p1 = (p1); \
40830 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40831 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40832 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40833 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40834 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40835 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40836 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40837
40838 #define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40839 __typeof(p1) __p1 = (p1); \
40840 __typeof(p2) __p2 = (p2); \
40841 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40842 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40843 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40844 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
40845 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
40846 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
40847 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
40848 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40849 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40850 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40851 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
40852 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40853 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40854
40855 #define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40856 __typeof(p1) __p1 = (p1); \
40857 __typeof(p2) __p2 = (p2); \
40858 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40859 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40860 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40861 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40862 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
40863 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
40864 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
40865
40866 #define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40867 __typeof(p1) __p1 = (p1); \
40868 __typeof(p2) __p2 = (p2); \
40869 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40870 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40871 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40872 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40873
40874 #define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40875 __typeof(p1) __p1 = (p1); \
40876 __typeof(p2) __p2 = (p2); \
40877 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40878 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
40879 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40880 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
40881
40882 #define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40883 __typeof(p1) __p1 = (p1); \
40884 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40885 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40886 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40887 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40888 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40889
40890 #define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40891 __typeof(p1) __p1 = (p1); \
40892 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40893 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40894 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40895 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40896 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40897
40898 #define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40899 __typeof(p1) __p1 = (p1); \
40900 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40901 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40902 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40903 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40904 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40905
40906 #define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40907 __typeof(p1) __p1 = (p1); \
40908 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40909 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
40910 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40911 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
40912 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
40913
40914 #define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40915 __typeof(p1) __p1 = (p1); \
40916 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40917 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40918 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40919 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40920 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40921
40922 #define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40923 __typeof(p1) __p1 = (p1); \
40924 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40925 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40926 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40927
40928 #define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40929 __typeof(p1) __p1 = (p1); \
40930 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40931 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40932 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40933
40934 #define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40935 __typeof(p1) __p1 = (p1); \
40936 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40937 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40938 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40939 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40940 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40941
40942 #define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40943 __typeof(p1) __p1 = (p1); \
40944 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40945 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40946 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40947 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40948 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40949
40950 #define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40951 __typeof(p1) __p1 = (p1); \
40952 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40953 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40954 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40955
40956 #define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40957 __typeof(p1) __p1 = (p1); \
40958 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40959 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40960 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
40961
40962 #define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40963 __typeof(p1) __p1 = (p1); \
40964 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40965 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40966 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40967 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40968 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40969
40970 #define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40971 __typeof(p1) __p1 = (p1); \
40972 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40973 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40974 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40975 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40976 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40977
40978 #define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40979 __typeof(p1) __p1 = (p1); \
40980 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
40981 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
40982 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
40983 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
40984 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
40985
40986 #define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40987 __typeof(p1) __p1 = (p1); \
40988 __typeof(p2) __p2 = (p2); \
40989 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40990 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
40991 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
40992 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
40993 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
40994
40995 #define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
40996 __typeof(p1) __p1 = (p1); \
40997 __typeof(p2) __p2 = (p2); \
40998 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
40999 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41000 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41001
41002 #define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41003 __typeof(p1) __p1 = (p1); \
41004 __typeof(p2) __p2 = (p2); \
41005 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41006 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41007 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41008
41009 #define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41010 __typeof(p1) __p1 = (p1); \
41011 __typeof(p2) __p2 = (p2); \
41012 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41013 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41014 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41015
41016 #define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41017 __typeof(p1) __p1 = (p1); \
41018 __typeof(p2) __p2 = (p2); \
41019 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41020 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41021 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41022
41023 #define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3)
41024
41025 #define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3)
41026
41027 #define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3)
41028
41029 #define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41030 __typeof(p1) __p1 = (p1); \
41031 __typeof(p2) __p2 = (p2); \
41032 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41033 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41034 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41035 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41036
41037 #define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41038 __typeof(p1) __p1 = (p1); \
41039 __typeof(p2) __p2 = (p2); \
41040 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41041 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41042 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41043 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41044
41045 #define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41046 __typeof(p1) __p1 = (p1); \
41047 __typeof(p2) __p2 = (p2); \
41048 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41049 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41050 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41051 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41052
41053 #define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41054 __typeof(p1) __p1 = (p1); \
41055 __typeof(p2) __p2 = (p2); \
41056 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41057 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41058 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41059 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41060
41061 #define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41062 __typeof(p1) __p1 = (p1); \
41063 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41064 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41065 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41066 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41067
41068 #define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41069 __typeof(p1) __p1 = (p1); \
41070 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41071 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41072 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41073 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41074 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41075 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41076 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
41077 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
41078 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce1(__p1, int) , p2), \
41079 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce1(__p1, int) , p2), \
41080 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce1(__p1, int) , p2));})
41081
41082 #define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41083 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41084 int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
41085 int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
41086 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
41087 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
41088
41089 #define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41090 __typeof(p1) __p1 = (p1); \
41091 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41092 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41093 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41094
41095 #define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41096 __typeof(p1) __p1 = (p1); \
41097 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41098 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
41099 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
41100 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
41101
41102 #define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41103 __typeof(p1) __p1 = (p1); \
41104 __typeof(p2) __p2 = (p2); \
41105 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41106 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41107 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41108 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41109 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41110 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41111 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41112
41113 #define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41114 __typeof(p1) __p1 = (p1); \
41115 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41116 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
41117 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
41118 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
41119 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
41120 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
41121 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
41122
41123 #define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41124 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41125 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
41126 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
41127 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
41128 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
41129 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
41130 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
41131
41132 #define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41133 __typeof(p1) __p1 = (p1); \
41134 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41135 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
41136 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
41137 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
41138 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
41139 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
41140 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
41141
41142 #define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41143 __typeof(p1) __p1 = (p1); \
41144 __typeof(p2) __p2 = (p2); \
41145 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41146 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41147 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41148 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41149 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41150 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41151 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
41152 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41153 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41154 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41155 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41156 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41157 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41158
41159 #define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41160 __typeof(p1) __p1 = (p1); \
41161 __typeof(p2) __p2 = (p2); \
41162 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41163 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41164 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41165 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41166
41167 #define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41168 __typeof(p1) __p1 = (p1); \
41169 __typeof(p2) __p2 = (p2); \
41170 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41171 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41172 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41173 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41174
41175 #define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41176 __typeof(p1) __p1 = (p1); \
41177 __typeof(p2) __p2 = (p2); \
41178 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41179 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41180 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41181 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41182 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41183 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41184 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
41185 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41186 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41187 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41188 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41189 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41190 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
41191
41192 #define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41193 __typeof(p1) __p1 = (p1); \
41194 __typeof(p2) __p2 = (p2); \
41195 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41196 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41197 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41198 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41199 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41200 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41201 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41202
41203 #define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41204 __typeof(p1) __p1 = (p1); \
41205 __typeof(p2) __p2 = (p2); \
41206 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41207 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41208 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41209 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41210 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41211 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41212 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41213
41214 #define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41215 __typeof(p1) __p1 = (p1); \
41216 __typeof(p2) __p2 = (p2); \
41217 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41218 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41219 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41220 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41221 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41222 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41223 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
41224
41225 #define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41226 __typeof(p1) __p1 = (p1); \
41227 __typeof(p2) __p2 = (p2); \
41228 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41229 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41230 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41231 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41232 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41233 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41234 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
41235
41236 #define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41237 __typeof(p1) __p1 = (p1); \
41238 __typeof(p2) __p2 = (p2); \
41239 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41240 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41241 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41242 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41243 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41244 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41245 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41246
41247 #define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41248 __typeof(p1) __p1 = (p1); \
41249 __typeof(p2) __p2 = (p2); \
41250 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41251 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41252 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41253 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41254 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41255 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41256 int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41257
41258 #define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41259 __typeof(p1) __p1 = (p1); \
41260 __typeof(p2) __p2 = (p2); \
41261 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41262 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41263 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41264 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41265 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41266 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41267 int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41268
41269 #define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41270 __typeof(p1) __p1 = (p1); \
41271 __typeof(p2) __p2 = (p2); \
41272 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41273 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41274 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41275
41276 #define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41277 __typeof(p1) __p1 = (p1); \
41278 __typeof(p2) __p2 = (p2); \
41279 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41280 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41281 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41282 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41283 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
41284 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
41285 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
41286 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41287 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41288 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41289 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41290 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41291 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41292
41293 #define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41294 __typeof(p1) __p1 = (p1); \
41295 __typeof(p2) __p2 = (p2); \
41296 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41297 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41298 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41299 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
41300
41301 #define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41302 __typeof(p1) __p1 = (p1); \
41303 __typeof(p2) __p2 = (p2); \
41304 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41305 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
41306 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41307 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41308 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41309 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41310 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41311
41312 #define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41313 __typeof(p1) __p1 = (p1); \
41314 __typeof(p2) __p2 = (p2); \
41315 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41316 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41317 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41318 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41319 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3));})
41320
41321 #define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41322 __typeof(p1) __p1 = (p1); \
41323 __typeof(p2) __p2 = (p2); \
41324 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41325 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
41326 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
41327 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41328 int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41329
41330 #define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41331 __typeof(p1) __p1 = (p1); \
41332 __typeof(p2) __p2 = (p2); \
41333 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41334 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41335 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41336 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41337
41338 #define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41339 __typeof(p1) __p1 = (p1); \
41340 __typeof(p2) __p2 = (p2); \
41341 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41342 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41343 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41344 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41345
41346 #define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41347 __typeof(p1) __p1 = (p1); \
41348 __typeof(p2) __p2 = (p2); \
41349 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41350 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41351 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41352 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41353
41354 #define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41355 __typeof(p1) __p1 = (p1); \
41356 __typeof(p2) __p2 = (p2); \
41357 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41358 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41359 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41360 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41361
41362 #define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41363 __typeof(p1) __p1 = (p1); \
41364 __typeof(p2) __p2 = (p2); \
41365 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41366 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41367 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41368 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
41369
41370 #define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41371 __typeof(p1) __p1 = (p1); \
41372 __typeof(p2) __p2 = (p2); \
41373 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41374 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41375 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41376
41377 #define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
41378 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
41379 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41380 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41381 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce(p0, int8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)), \
41382 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41383 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41384 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce(p0, uint8_t const *), __ARM_mve_coerce(__p1, uint32x4_t)));})
41385
41386 #define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41387 __typeof(p1) __p1 = (p1); \
41388 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41389 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
41390 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
41391 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
41392 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41393 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41394 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
41395
41396 #define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41397 __typeof(p1) __p1 = (p1); \
41398 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41399 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
41400 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
41401 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
41402 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41403 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3), \
41404 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
41405
41406 #define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41407 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41408 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 ((uint32_t) __p0, p1), \
41409 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41410
41411 #define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41412 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41413 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 ((uint32_t) __p0, p1), \
41414 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41415
41416 #define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41417 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41418 int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 ((uint32_t) __p0, p1), \
41419 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41420
41421 #define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41422 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41423 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 ((uint32_t) __p0, p1), \
41424 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41425
41426 #define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41427 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41428 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 ((uint32_t) __p0, p1), \
41429 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41430
41431 #define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41432 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41433 int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 ((uint32_t) __p0, p1), \
41434 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1));})
41435
41436 #define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
41437 __typeof(p1) __p1 = (p1); \
41438 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41439 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41440 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41441 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41442 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41443 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41444 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
41445
41446 #define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41447 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41448 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41449 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41450
41451 #define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41452 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41453 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41454 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41455
41456 #define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41457 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41458 int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41459 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41460
41461 #define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
41462 __typeof(p1) __p1 = (p1); \
41463 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41464 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41465 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41466 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
41467 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41468 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4), \
41469 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
41470
41471 #define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41472 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41473 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41474 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41475
41476 #define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41477 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41478 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41479 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41480
41481 #define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41482 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41483 int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
41484 int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
41485
41486 #define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41487 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41488 int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \
41489 int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \
41490 int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \
41491 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \
41492 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \
41493 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));})
41494
41495 #define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41496 __typeof(p1) __p1 = (p1); \
41497 __typeof(p2) __p2 = (p2); \
41498 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41499 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41500 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41501 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41502 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41503 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41504 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41505
41506 #define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41507 __typeof(p1) __p1 = (p1); \
41508 __typeof(p2) __p2 = (p2); \
41509 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41510 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41511 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41512 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41513 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41514 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41515 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41516
41517 #define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41518 __typeof(p1) __p1 = (p1); \
41519 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41520 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41521 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41522
41523 #define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41524 __typeof(p1) __p1 = (p1); \
41525 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41526 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41527 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41528
41529 #define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \
41530 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41531 int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
41532 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
41533
41534 #define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41535 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41536 int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
41537 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
41538
41539 #define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41540 __typeof(p1) __p1 = (p1); \
41541 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41542 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41543 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41544 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41545 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41546 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41547 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41548
41549 #define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41550 __typeof(p1) __p1 = (p1); \
41551 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41552 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41553 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41554 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41555 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41556 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41557 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41558
41559 #define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \
41560 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41561 int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
41562 int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
41563 int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
41564 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
41565 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
41566 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
41567
41568 #define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41569 _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
41570 int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
41571 int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
41572 int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
41573 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
41574 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
41575 int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
41576
41577 #define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41578 __typeof(p1) __p1 = (p1); \
41579 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41580 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41581 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41582 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
41583 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
41584 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
41585 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
41586
41587 #define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41588 __typeof(p1) __p1 = (p1); \
41589 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41590 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41591 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41592 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
41593 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
41594 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
41595 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
41596
41597 #define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41598 __typeof(p1) __p1 = (p1); \
41599 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41600 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41601 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41602 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
41603 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
41604 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
41605 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
41606
41607 #define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41608 __typeof(p1) __p1 = (p1); \
41609 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41610 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
41611 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
41612 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
41613 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41614 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41615 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41616
41617 #define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41618 __typeof(p1) __p1 = (p1); \
41619 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41620 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41621 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41622 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
41623
41624 #define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41625 __typeof(p1) __p1 = (p1); \
41626 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41627 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41628 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41629 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
41630
41631 #define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41632 __typeof(p2) __p2 = (p2); \
41633 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41634 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41635 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41636 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41637 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41638 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41639 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41640
41641 #define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41642 __typeof(p1) __p1 = (p1); \
41643 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41644 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41645 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41646 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
41647 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
41648 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
41649 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__p0,__ARM_mve_coerce(__p1, uint32x4_t)));})
41650
41651 #define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41652 __typeof(p1) __p1 = (p1); \
41653 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41654 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41655 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41656 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
41657 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41658 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41659 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41660
41661 #define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41662 __typeof(p1) __p1 = (p1); \
41663 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41664 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41665 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41666 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
41667
41668 #define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41669 __typeof(p1) __p1 = (p1); \
41670 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41671 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41672 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41673 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
41674
41675 #define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41676 __typeof(p2) __p2 = (p2); \
41677 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41678 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41679 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41680 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41681 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41682 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41683 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41684
41685 #define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41686 __typeof(p1) __p1 = (p1); \
41687 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41688 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
41689 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
41690 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
41691 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
41692 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
41693 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t)));})
41694
41695 #define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41696 __typeof(p1) __p1 = (p1); \
41697 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41698 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
41699 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
41700 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
41701 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41702 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41703 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41704
41705 #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41706 __typeof(p1) __p1 = (p1); \
41707 __typeof(p2) __p2 = (p2); \
41708 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41709 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41710 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41711 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41712 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41713 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41714 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41715
41716 #define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
41717 __typeof(p1) __p1 = (p1); \
41718 __typeof(p2) __p2 = (p2); \
41719 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41720 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41721 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41722 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41723 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41724 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41725 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41726
41727 #define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41728 __typeof(p1) __p1 = (p1); \
41729 __typeof(p2) __p2 = (p2); \
41730 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41731 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41732 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41733 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41734 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
41735 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41736 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41737
41738 #define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41739 __typeof(p1) __p1 = (p1); \
41740 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41741 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41742 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41743 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41744 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41745 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41746 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41747
41748 #define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41749 __typeof(p1) __p1 = (p1); \
41750 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41751 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41752 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41753 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41754 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
41755 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41756 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41757
41758 #define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41759 __typeof(p1) __p1 = (p1); \
41760 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41761 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41762 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41763 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41764 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
41765 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41766 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41767
41768 #define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41769 __typeof(p1) __p1 = (p1); \
41770 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41771 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41772 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41773 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41774
41775 #define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41776 __typeof(p1) __p1 = (p1); \
41777 __typeof(p2) __p2 = (p2); \
41778 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41779 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41780 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41781 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
41782 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41783
41784 #define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41785 __typeof(p1) __p1 = (p1); \
41786 __typeof(p2) __p2 = (p2); \
41787 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41788 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41789 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41790
41791 #define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41792 __typeof(p1) __p1 = (p1); \
41793 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41794 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41795 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41796 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
41797 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41798
41799 #define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41800 __typeof(p1) __p1 = (p1); \
41801 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41802 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41803 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41804 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
41805 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41806
41807 #define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41808 __typeof(p1) __p1 = (p1); \
41809 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41810 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41811 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41812
41813 #define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41814 __typeof(p1) __p1 = (p1); \
41815 __typeof(p2) __p2 = (p2); \
41816 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41817 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41818 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41819 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41820
41821 #define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41822 __typeof(p1) __p1 = (p1); \
41823 __typeof(p2) __p2 = (p2); \
41824 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41825 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
41826 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41827 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41828
41829 #define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41830 __typeof(p1) __p1 = (p1); \
41831 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41832 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41833 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41834 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41835
41836 #define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41837 __typeof(p1) __p1 = (p1); \
41838 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41839 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41840 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41841 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41842
41843 #define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41844 __typeof(p1) __p1 = (p1); \
41845 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41846 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
41847 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41848 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41849
41850 #define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41851 __typeof(p1) __p1 = (p1); \
41852 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41853 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
41854 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41855 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41856
41857 #define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41858 __typeof(p1) __p1 = (p1); \
41859 __typeof(p2) __p2 = (p2); \
41860 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41861 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41862 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41863
41864 #define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41865 __typeof(p1) __p1 = (p1); \
41866 __typeof(p2) __p2 = (p2); \
41867 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41868 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
41869 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
41870
41871 #define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41872 __typeof(p1) __p1 = (p1); \
41873 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41874 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41875 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41876
41877 #define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41878 __typeof(p1) __p1 = (p1); \
41879 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41880 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41881 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41882
41883 #define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41884 __typeof(p1) __p1 = (p1); \
41885 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41886 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
41887 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
41888
41889 #define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41890 __typeof(p1) __p1 = (p1); \
41891 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41892 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
41893 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
41894
41895 #define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
41896 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
41897 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
41898 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
41899 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
41900 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
41901
41902 #define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
41903 _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
41904 int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
41905 int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
41906 int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
41907 int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
41908
41909 #define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41910 __typeof(p2) __p2 = (p2); \
41911 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41912 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41913 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41914 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41915 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41916 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41917 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41918
41919 #define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41920 __typeof(p2) __p2 = (p2); \
41921 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41922 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41923 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41924 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41925 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41926 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41927 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41928
41929 #define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41930 __typeof(p2) __p2 = (p2); \
41931 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41932 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41933 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41934
41935 #define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41936 __typeof(p2) __p2 = (p2); \
41937 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41938 int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
41939 int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
41940 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
41941 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41942 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
41943 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
41944
41945 #define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
41946 __typeof(p2) __p2 = (p2); \
41947 _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41948 int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
41949 int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
41950
41951 #define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41952 __typeof(p1) __p1 = (p1); \
41953 __typeof(p2) __p2 = (p2); \
41954 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
41955 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
41956 int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
41957
41958 #define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2)
41959
41960 #define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
41961 __typeof(p1) __p1 = (p1); \
41962 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41963 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
41964 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
41965
41966 #define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
41967 __typeof(p1) __p1 = (p1); \
41968 _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
41969 int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
41970 int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
41971
41972 #define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1)
41973
41974 #define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2)
41975
41976 #define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2)
41977
41978 #define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2)
41979
41980 #define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1)
41981
41982 #define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2)
41983
41984 #define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1)
41985
41986 #define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2)
41987
41988 #define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
41989 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
41990 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
41991 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
41992 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
41993 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
41994 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
41995 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
41996
41997 #define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
41998 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
41999 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
42000 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
42001 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
42002 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
42003 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
42004 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
42005
42006 #define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
42007 __typeof(p2) __p2 = (p2); \
42008 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
42009 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
42010 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
42011 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
42012 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
42013 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
42014 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
42015
42016
42017 #define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({__typeof(p1) __p1 = (p1); \
42018 __typeof(p2) __p2 = (p2); \
42019 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
42020 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
42021 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
42022 int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
42023 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
42024 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
42025 int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
42026
42027 #define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
42028 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
42029 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
42030 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
42031
42032 #define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
42033 _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
42034 int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
42035 int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
42036
42037 #define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
42038 __typeof(p2) __p2 = (p2); \
42039 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
42040 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \
42041 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
42042
42043 #define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
42044 __typeof(p2) __p2 = (p2); \
42045 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
42046 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
42047 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
42048
42049 #define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
42050 __typeof(p2) __p2 = (p2); \
42051 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
42052 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \
42053 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
42054
42055 #define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
42056 __typeof(p2) __p2 = (p2); \
42057 _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
42058 int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
42059 int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
42060
42061 #endif /* __cplusplus */
42062 #endif /* __ARM_FEATURE_MVE */
42063 #endif /* _GCC_ARM_MVE_H. */