LLVM 19.0.0git
AutoUpgrade.cpp
Go to the documentation of this file.
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the auto-upgrade helper functions.
10// This is where deprecated IR intrinsics and other IR features are updated to
11// current specifications.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/IR/AutoUpgrade.h"
16#include "llvm/ADT/StringRef.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Function.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/IntrinsicsAArch64.h"
31#include "llvm/IR/IntrinsicsARM.h"
32#include "llvm/IR/IntrinsicsNVPTX.h"
33#include "llvm/IR/IntrinsicsRISCV.h"
34#include "llvm/IR/IntrinsicsWebAssembly.h"
35#include "llvm/IR/IntrinsicsX86.h"
36#include "llvm/IR/LLVMContext.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Verifier.h"
42#include "llvm/Support/Regex.h"
44#include <cstring>
45
46using namespace llvm;
47
48static cl::opt<bool>
49 DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
50 cl::desc("Disable autoupgrade of debug info"));
51
52static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
53
54// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
55// changed their type from v4f32 to v2i64.
57 Function *&NewFn) {
58 // Check whether this is an old version of the function, which received
59 // v4f32 arguments.
60 Type *Arg0Type = F->getFunctionType()->getParamType(0);
61 if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
62 return false;
63
64 // Yes, it's old, replace it with new version.
65 rename(F);
66 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
67 return true;
68}
69
70// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
71// arguments have changed their type from i32 to i8.
73 Function *&NewFn) {
74 // Check that the last argument is an i32.
75 Type *LastArgType = F->getFunctionType()->getParamType(
76 F->getFunctionType()->getNumParams() - 1);
77 if (!LastArgType->isIntegerTy(32))
78 return false;
79
80 // Move this function aside and map down.
81 rename(F);
82 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
83 return true;
84}
85
86// Upgrade the declaration of fp compare intrinsics that change return type
87// from scalar to vXi1 mask.
89 Function *&NewFn) {
90 // Check if the return type is a vector.
91 if (F->getReturnType()->isVectorTy())
92 return false;
93
94 rename(F);
95 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
96 return true;
97}
98
100 Function *&NewFn) {
101 if (F->getReturnType()->getScalarType()->isBFloatTy())
102 return false;
103
104 rename(F);
105 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
106 return true;
107}
108
110 Function *&NewFn) {
111 if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
112 return false;
113
114 rename(F);
115 NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
116 return true;
117}
118
120 // All of the intrinsics matches below should be marked with which llvm
121 // version started autoupgrading them. At some point in the future we would
122 // like to use this information to remove upgrade code for some older
123 // intrinsics. It is currently undecided how we will determine that future
124 // point.
125 if (Name.consume_front("avx."))
126 return (Name.starts_with("blend.p") || // Added in 3.7
127 Name == "cvt.ps2.pd.256" || // Added in 3.9
128 Name == "cvtdq2.pd.256" || // Added in 3.9
129 Name == "cvtdq2.ps.256" || // Added in 7.0
130 Name.starts_with("movnt.") || // Added in 3.2
131 Name.starts_with("sqrt.p") || // Added in 7.0
132 Name.starts_with("storeu.") || // Added in 3.9
133 Name.starts_with("vbroadcast.s") || // Added in 3.5
134 Name.starts_with("vbroadcastf128") || // Added in 4.0
135 Name.starts_with("vextractf128.") || // Added in 3.7
136 Name.starts_with("vinsertf128.") || // Added in 3.7
137 Name.starts_with("vperm2f128.") || // Added in 6.0
138 Name.starts_with("vpermil.")); // Added in 3.1
139
140 if (Name.consume_front("avx2."))
141 return (Name == "movntdqa" || // Added in 5.0
142 Name.starts_with("pabs.") || // Added in 6.0
143 Name.starts_with("padds.") || // Added in 8.0
144 Name.starts_with("paddus.") || // Added in 8.0
145 Name.starts_with("pblendd.") || // Added in 3.7
146 Name == "pblendw" || // Added in 3.7
147 Name.starts_with("pbroadcast") || // Added in 3.8
148 Name.starts_with("pcmpeq.") || // Added in 3.1
149 Name.starts_with("pcmpgt.") || // Added in 3.1
150 Name.starts_with("pmax") || // Added in 3.9
151 Name.starts_with("pmin") || // Added in 3.9
152 Name.starts_with("pmovsx") || // Added in 3.9
153 Name.starts_with("pmovzx") || // Added in 3.9
154 Name == "pmul.dq" || // Added in 7.0
155 Name == "pmulu.dq" || // Added in 7.0
156 Name.starts_with("psll.dq") || // Added in 3.7
157 Name.starts_with("psrl.dq") || // Added in 3.7
158 Name.starts_with("psubs.") || // Added in 8.0
159 Name.starts_with("psubus.") || // Added in 8.0
160 Name.starts_with("vbroadcast") || // Added in 3.8
161 Name == "vbroadcasti128" || // Added in 3.7
162 Name == "vextracti128" || // Added in 3.7
163 Name == "vinserti128" || // Added in 3.7
164 Name == "vperm2i128"); // Added in 6.0
165
166 if (Name.consume_front("avx512.")) {
167 if (Name.consume_front("mask."))
168 // 'avx512.mask.*'
169 return (Name.starts_with("add.p") || // Added in 7.0. 128/256 in 4.0
170 Name.starts_with("and.") || // Added in 3.9
171 Name.starts_with("andn.") || // Added in 3.9
172 Name.starts_with("broadcast.s") || // Added in 3.9
173 Name.starts_with("broadcastf32x4.") || // Added in 6.0
174 Name.starts_with("broadcastf32x8.") || // Added in 6.0
175 Name.starts_with("broadcastf64x2.") || // Added in 6.0
176 Name.starts_with("broadcastf64x4.") || // Added in 6.0
177 Name.starts_with("broadcasti32x4.") || // Added in 6.0
178 Name.starts_with("broadcasti32x8.") || // Added in 6.0
179 Name.starts_with("broadcasti64x2.") || // Added in 6.0
180 Name.starts_with("broadcasti64x4.") || // Added in 6.0
181 Name.starts_with("cmp.b") || // Added in 5.0
182 Name.starts_with("cmp.d") || // Added in 5.0
183 Name.starts_with("cmp.q") || // Added in 5.0
184 Name.starts_with("cmp.w") || // Added in 5.0
185 Name.starts_with("compress.b") || // Added in 9.0
186 Name.starts_with("compress.d") || // Added in 9.0
187 Name.starts_with("compress.p") || // Added in 9.0
188 Name.starts_with("compress.q") || // Added in 9.0
189 Name.starts_with("compress.store.") || // Added in 7.0
190 Name.starts_with("compress.w") || // Added in 9.0
191 Name.starts_with("conflict.") || // Added in 9.0
192 Name.starts_with("cvtdq2pd.") || // Added in 4.0
193 Name.starts_with("cvtdq2ps.") || // Added in 7.0 updated 9.0
194 Name == "cvtpd2dq.256" || // Added in 7.0
195 Name == "cvtpd2ps.256" || // Added in 7.0
196 Name == "cvtps2pd.128" || // Added in 7.0
197 Name == "cvtps2pd.256" || // Added in 7.0
198 Name.starts_with("cvtqq2pd.") || // Added in 7.0 updated 9.0
199 Name == "cvtqq2ps.256" || // Added in 9.0
200 Name == "cvtqq2ps.512" || // Added in 9.0
201 Name == "cvttpd2dq.256" || // Added in 7.0
202 Name == "cvttps2dq.128" || // Added in 7.0
203 Name == "cvttps2dq.256" || // Added in 7.0
204 Name.starts_with("cvtudq2pd.") || // Added in 4.0
205 Name.starts_with("cvtudq2ps.") || // Added in 7.0 updated 9.0
206 Name.starts_with("cvtuqq2pd.") || // Added in 7.0 updated 9.0
207 Name == "cvtuqq2ps.256" || // Added in 9.0
208 Name == "cvtuqq2ps.512" || // Added in 9.0
209 Name.starts_with("dbpsadbw.") || // Added in 7.0
210 Name.starts_with("div.p") || // Added in 7.0. 128/256 in 4.0
211 Name.starts_with("expand.b") || // Added in 9.0
212 Name.starts_with("expand.d") || // Added in 9.0
213 Name.starts_with("expand.load.") || // Added in 7.0
214 Name.starts_with("expand.p") || // Added in 9.0
215 Name.starts_with("expand.q") || // Added in 9.0
216 Name.starts_with("expand.w") || // Added in 9.0
217 Name.starts_with("fpclass.p") || // Added in 7.0
218 Name.starts_with("insert") || // Added in 4.0
219 Name.starts_with("load.") || // Added in 3.9
220 Name.starts_with("loadu.") || // Added in 3.9
221 Name.starts_with("lzcnt.") || // Added in 5.0
222 Name.starts_with("max.p") || // Added in 7.0. 128/256 in 5.0
223 Name.starts_with("min.p") || // Added in 7.0. 128/256 in 5.0
224 Name.starts_with("movddup") || // Added in 3.9
225 Name.starts_with("move.s") || // Added in 4.0
226 Name.starts_with("movshdup") || // Added in 3.9
227 Name.starts_with("movsldup") || // Added in 3.9
228 Name.starts_with("mul.p") || // Added in 7.0. 128/256 in 4.0
229 Name.starts_with("or.") || // Added in 3.9
230 Name.starts_with("pabs.") || // Added in 6.0
231 Name.starts_with("packssdw.") || // Added in 5.0
232 Name.starts_with("packsswb.") || // Added in 5.0
233 Name.starts_with("packusdw.") || // Added in 5.0
234 Name.starts_with("packuswb.") || // Added in 5.0
235 Name.starts_with("padd.") || // Added in 4.0
236 Name.starts_with("padds.") || // Added in 8.0
237 Name.starts_with("paddus.") || // Added in 8.0
238 Name.starts_with("palignr.") || // Added in 3.9
239 Name.starts_with("pand.") || // Added in 3.9
240 Name.starts_with("pandn.") || // Added in 3.9
241 Name.starts_with("pavg") || // Added in 6.0
242 Name.starts_with("pbroadcast") || // Added in 6.0
243 Name.starts_with("pcmpeq.") || // Added in 3.9
244 Name.starts_with("pcmpgt.") || // Added in 3.9
245 Name.starts_with("perm.df.") || // Added in 3.9
246 Name.starts_with("perm.di.") || // Added in 3.9
247 Name.starts_with("permvar.") || // Added in 7.0
248 Name.starts_with("pmaddubs.w.") || // Added in 7.0
249 Name.starts_with("pmaddw.d.") || // Added in 7.0
250 Name.starts_with("pmax") || // Added in 4.0
251 Name.starts_with("pmin") || // Added in 4.0
252 Name == "pmov.qd.256" || // Added in 9.0
253 Name == "pmov.qd.512" || // Added in 9.0
254 Name == "pmov.wb.256" || // Added in 9.0
255 Name == "pmov.wb.512" || // Added in 9.0
256 Name.starts_with("pmovsx") || // Added in 4.0
257 Name.starts_with("pmovzx") || // Added in 4.0
258 Name.starts_with("pmul.dq.") || // Added in 4.0
259 Name.starts_with("pmul.hr.sw.") || // Added in 7.0
260 Name.starts_with("pmulh.w.") || // Added in 7.0
261 Name.starts_with("pmulhu.w.") || // Added in 7.0
262 Name.starts_with("pmull.") || // Added in 4.0
263 Name.starts_with("pmultishift.qb.") || // Added in 8.0
264 Name.starts_with("pmulu.dq.") || // Added in 4.0
265 Name.starts_with("por.") || // Added in 3.9
266 Name.starts_with("prol.") || // Added in 8.0
267 Name.starts_with("prolv.") || // Added in 8.0
268 Name.starts_with("pror.") || // Added in 8.0
269 Name.starts_with("prorv.") || // Added in 8.0
270 Name.starts_with("pshuf.b.") || // Added in 4.0
271 Name.starts_with("pshuf.d.") || // Added in 3.9
272 Name.starts_with("pshufh.w.") || // Added in 3.9
273 Name.starts_with("pshufl.w.") || // Added in 3.9
274 Name.starts_with("psll.d") || // Added in 4.0
275 Name.starts_with("psll.q") || // Added in 4.0
276 Name.starts_with("psll.w") || // Added in 4.0
277 Name.starts_with("pslli") || // Added in 4.0
278 Name.starts_with("psllv") || // Added in 4.0
279 Name.starts_with("psra.d") || // Added in 4.0
280 Name.starts_with("psra.q") || // Added in 4.0
281 Name.starts_with("psra.w") || // Added in 4.0
282 Name.starts_with("psrai") || // Added in 4.0
283 Name.starts_with("psrav") || // Added in 4.0
284 Name.starts_with("psrl.d") || // Added in 4.0
285 Name.starts_with("psrl.q") || // Added in 4.0
286 Name.starts_with("psrl.w") || // Added in 4.0
287 Name.starts_with("psrli") || // Added in 4.0
288 Name.starts_with("psrlv") || // Added in 4.0
289 Name.starts_with("psub.") || // Added in 4.0
290 Name.starts_with("psubs.") || // Added in 8.0
291 Name.starts_with("psubus.") || // Added in 8.0
292 Name.starts_with("pternlog.") || // Added in 7.0
293 Name.starts_with("punpckh") || // Added in 3.9
294 Name.starts_with("punpckl") || // Added in 3.9
295 Name.starts_with("pxor.") || // Added in 3.9
296 Name.starts_with("shuf.f") || // Added in 6.0
297 Name.starts_with("shuf.i") || // Added in 6.0
298 Name.starts_with("shuf.p") || // Added in 4.0
299 Name.starts_with("sqrt.p") || // Added in 7.0
300 Name.starts_with("store.b.") || // Added in 3.9
301 Name.starts_with("store.d.") || // Added in 3.9
302 Name.starts_with("store.p") || // Added in 3.9
303 Name.starts_with("store.q.") || // Added in 3.9
304 Name.starts_with("store.w.") || // Added in 3.9
305 Name == "store.ss" || // Added in 7.0
306 Name.starts_with("storeu.") || // Added in 3.9
307 Name.starts_with("sub.p") || // Added in 7.0. 128/256 in 4.0
308 Name.starts_with("ucmp.") || // Added in 5.0
309 Name.starts_with("unpckh.") || // Added in 3.9
310 Name.starts_with("unpckl.") || // Added in 3.9
311 Name.starts_with("valign.") || // Added in 4.0
312 Name == "vcvtph2ps.128" || // Added in 11.0
313 Name == "vcvtph2ps.256" || // Added in 11.0
314 Name.starts_with("vextract") || // Added in 4.0
315 Name.starts_with("vfmadd.") || // Added in 7.0
316 Name.starts_with("vfmaddsub.") || // Added in 7.0
317 Name.starts_with("vfnmadd.") || // Added in 7.0
318 Name.starts_with("vfnmsub.") || // Added in 7.0
319 Name.starts_with("vpdpbusd.") || // Added in 7.0
320 Name.starts_with("vpdpbusds.") || // Added in 7.0
321 Name.starts_with("vpdpwssd.") || // Added in 7.0
322 Name.starts_with("vpdpwssds.") || // Added in 7.0
323 Name.starts_with("vpermi2var.") || // Added in 7.0
324 Name.starts_with("vpermil.p") || // Added in 3.9
325 Name.starts_with("vpermilvar.") || // Added in 4.0
326 Name.starts_with("vpermt2var.") || // Added in 7.0
327 Name.starts_with("vpmadd52") || // Added in 7.0
328 Name.starts_with("vpshld.") || // Added in 7.0
329 Name.starts_with("vpshldv.") || // Added in 8.0
330 Name.starts_with("vpshrd.") || // Added in 7.0
331 Name.starts_with("vpshrdv.") || // Added in 8.0
332 Name.starts_with("vpshufbitqmb.") || // Added in 8.0
333 Name.starts_with("xor.")); // Added in 3.9
334
335 if (Name.consume_front("mask3."))
336 // 'avx512.mask3.*'
337 return (Name.starts_with("vfmadd.") || // Added in 7.0
338 Name.starts_with("vfmaddsub.") || // Added in 7.0
339 Name.starts_with("vfmsub.") || // Added in 7.0
340 Name.starts_with("vfmsubadd.") || // Added in 7.0
341 Name.starts_with("vfnmsub.")); // Added in 7.0
342
343 if (Name.consume_front("maskz."))
344 // 'avx512.maskz.*'
345 return (Name.starts_with("pternlog.") || // Added in 7.0
346 Name.starts_with("vfmadd.") || // Added in 7.0
347 Name.starts_with("vfmaddsub.") || // Added in 7.0
348 Name.starts_with("vpdpbusd.") || // Added in 7.0
349 Name.starts_with("vpdpbusds.") || // Added in 7.0
350 Name.starts_with("vpdpwssd.") || // Added in 7.0
351 Name.starts_with("vpdpwssds.") || // Added in 7.0
352 Name.starts_with("vpermt2var.") || // Added in 7.0
353 Name.starts_with("vpmadd52") || // Added in 7.0
354 Name.starts_with("vpshldv.") || // Added in 8.0
355 Name.starts_with("vpshrdv.")); // Added in 8.0
356
357 // 'avx512.*'
358 return (Name == "movntdqa" || // Added in 5.0
359 Name == "pmul.dq.512" || // Added in 7.0
360 Name == "pmulu.dq.512" || // Added in 7.0
361 Name.starts_with("broadcastm") || // Added in 6.0
362 Name.starts_with("cmp.p") || // Added in 12.0
363 Name.starts_with("cvtb2mask.") || // Added in 7.0
364 Name.starts_with("cvtd2mask.") || // Added in 7.0
365 Name.starts_with("cvtmask2") || // Added in 5.0
366 Name.starts_with("cvtq2mask.") || // Added in 7.0
367 Name == "cvtusi2sd" || // Added in 7.0
368 Name.starts_with("cvtw2mask.") || // Added in 7.0
369 Name == "kand.w" || // Added in 7.0
370 Name == "kandn.w" || // Added in 7.0
371 Name == "knot.w" || // Added in 7.0
372 Name == "kor.w" || // Added in 7.0
373 Name == "kortestc.w" || // Added in 7.0
374 Name == "kortestz.w" || // Added in 7.0
375 Name.starts_with("kunpck") || // added in 6.0
376 Name == "kxnor.w" || // Added in 7.0
377 Name == "kxor.w" || // Added in 7.0
378 Name.starts_with("padds.") || // Added in 8.0
379 Name.starts_with("pbroadcast") || // Added in 3.9
380 Name.starts_with("prol") || // Added in 8.0
381 Name.starts_with("pror") || // Added in 8.0
382 Name.starts_with("psll.dq") || // Added in 3.9
383 Name.starts_with("psrl.dq") || // Added in 3.9
384 Name.starts_with("psubs.") || // Added in 8.0
385 Name.starts_with("ptestm") || // Added in 6.0
386 Name.starts_with("ptestnm") || // Added in 6.0
387 Name.starts_with("storent.") || // Added in 3.9
388 Name.starts_with("vbroadcast.s") || // Added in 7.0
389 Name.starts_with("vpshld.") || // Added in 8.0
390 Name.starts_with("vpshrd.")); // Added in 8.0
391 }
392
393 if (Name.consume_front("fma."))
394 return (Name.starts_with("vfmadd.") || // Added in 7.0
395 Name.starts_with("vfmsub.") || // Added in 7.0
396 Name.starts_with("vfmsubadd.") || // Added in 7.0
397 Name.starts_with("vfnmadd.") || // Added in 7.0
398 Name.starts_with("vfnmsub.")); // Added in 7.0
399
400 if (Name.consume_front("fma4."))
401 return Name.starts_with("vfmadd.s"); // Added in 7.0
402
403 if (Name.consume_front("sse."))
404 return (Name == "add.ss" || // Added in 4.0
405 Name == "cvtsi2ss" || // Added in 7.0
406 Name == "cvtsi642ss" || // Added in 7.0
407 Name == "div.ss" || // Added in 4.0
408 Name == "mul.ss" || // Added in 4.0
409 Name.starts_with("sqrt.p") || // Added in 7.0
410 Name == "sqrt.ss" || // Added in 7.0
411 Name.starts_with("storeu.") || // Added in 3.9
412 Name == "sub.ss"); // Added in 4.0
413
414 if (Name.consume_front("sse2."))
415 return (Name == "add.sd" || // Added in 4.0
416 Name == "cvtdq2pd" || // Added in 3.9
417 Name == "cvtdq2ps" || // Added in 7.0
418 Name == "cvtps2pd" || // Added in 3.9
419 Name == "cvtsi2sd" || // Added in 7.0
420 Name == "cvtsi642sd" || // Added in 7.0
421 Name == "cvtss2sd" || // Added in 7.0
422 Name == "div.sd" || // Added in 4.0
423 Name == "mul.sd" || // Added in 4.0
424 Name.starts_with("padds.") || // Added in 8.0
425 Name.starts_with("paddus.") || // Added in 8.0
426 Name.starts_with("pcmpeq.") || // Added in 3.1
427 Name.starts_with("pcmpgt.") || // Added in 3.1
428 Name == "pmaxs.w" || // Added in 3.9
429 Name == "pmaxu.b" || // Added in 3.9
430 Name == "pmins.w" || // Added in 3.9
431 Name == "pminu.b" || // Added in 3.9
432 Name == "pmulu.dq" || // Added in 7.0
433 Name.starts_with("pshuf") || // Added in 3.9
434 Name.starts_with("psll.dq") || // Added in 3.7
435 Name.starts_with("psrl.dq") || // Added in 3.7
436 Name.starts_with("psubs.") || // Added in 8.0
437 Name.starts_with("psubus.") || // Added in 8.0
438 Name.starts_with("sqrt.p") || // Added in 7.0
439 Name == "sqrt.sd" || // Added in 7.0
440 Name == "storel.dq" || // Added in 3.9
441 Name.starts_with("storeu.") || // Added in 3.9
442 Name == "sub.sd"); // Added in 4.0
443
444 if (Name.consume_front("sse41."))
445 return (Name.starts_with("blendp") || // Added in 3.7
446 Name == "movntdqa" || // Added in 5.0
447 Name == "pblendw" || // Added in 3.7
448 Name == "pmaxsb" || // Added in 3.9
449 Name == "pmaxsd" || // Added in 3.9
450 Name == "pmaxud" || // Added in 3.9
451 Name == "pmaxuw" || // Added in 3.9
452 Name == "pminsb" || // Added in 3.9
453 Name == "pminsd" || // Added in 3.9
454 Name == "pminud" || // Added in 3.9
455 Name == "pminuw" || // Added in 3.9
456 Name.starts_with("pmovsx") || // Added in 3.8
457 Name.starts_with("pmovzx") || // Added in 3.9
458 Name == "pmuldq"); // Added in 7.0
459
460 if (Name.consume_front("sse42."))
461 return Name == "crc32.64.8"; // Added in 3.4
462
463 if (Name.consume_front("sse4a."))
464 return Name.starts_with("movnt."); // Added in 3.9
465
466 if (Name.consume_front("ssse3."))
467 return (Name == "pabs.b.128" || // Added in 6.0
468 Name == "pabs.d.128" || // Added in 6.0
469 Name == "pabs.w.128"); // Added in 6.0
470
471 if (Name.consume_front("xop."))
472 return (Name == "vpcmov" || // Added in 3.8
473 Name == "vpcmov.256" || // Added in 5.0
474 Name.starts_with("vpcom") || // Added in 3.2, Updated in 9.0
475 Name.starts_with("vprot")); // Added in 8.0
476
477 return (Name == "addcarry.u32" || // Added in 8.0
478 Name == "addcarry.u64" || // Added in 8.0
479 Name == "addcarryx.u32" || // Added in 8.0
480 Name == "addcarryx.u64" || // Added in 8.0
481 Name == "subborrow.u32" || // Added in 8.0
482 Name == "subborrow.u64" || // Added in 8.0
483 Name.starts_with("vcvtph2ps.")); // Added in 11.0
484}
485
487 Function *&NewFn) {
488 // Only handle intrinsics that start with "x86.".
489 if (!Name.consume_front("x86."))
490 return false;
491
493 NewFn = nullptr;
494 return true;
495 }
496
497 if (Name == "rdtscp") { // Added in 8.0
498 // If this intrinsic has 0 operands, it's the new version.
499 if (F->getFunctionType()->getNumParams() == 0)
500 return false;
501
502 rename(F);
503 NewFn = Intrinsic::getDeclaration(F->getParent(),
504 Intrinsic::x86_rdtscp);
505 return true;
506 }
507
509
510 // SSE4.1 ptest functions may have an old signature.
511 if (Name.consume_front("sse41.ptest")) { // Added in 3.2
513 .Case("c", Intrinsic::x86_sse41_ptestc)
514 .Case("z", Intrinsic::x86_sse41_ptestz)
515 .Case("nzc", Intrinsic::x86_sse41_ptestnzc)
518 return upgradePTESTIntrinsic(F, ID, NewFn);
519
520 return false;
521 }
522
523 // Several blend and other instructions with masks used the wrong number of
524 // bits.
525
526 // Added in 3.6
528 .Case("sse41.insertps", Intrinsic::x86_sse41_insertps)
529 .Case("sse41.dppd", Intrinsic::x86_sse41_dppd)
530 .Case("sse41.dpps", Intrinsic::x86_sse41_dpps)
531 .Case("sse41.mpsadbw", Intrinsic::x86_sse41_mpsadbw)
532 .Case("avx.dp.ps.256", Intrinsic::x86_avx_dp_ps_256)
533 .Case("avx2.mpsadbw", Intrinsic::x86_avx2_mpsadbw)
536 return upgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
537
538 if (Name.consume_front("avx512.mask.cmp.")) {
539 // Added in 7.0
541 .Case("pd.128", Intrinsic::x86_avx512_mask_cmp_pd_128)
542 .Case("pd.256", Intrinsic::x86_avx512_mask_cmp_pd_256)
543 .Case("pd.512", Intrinsic::x86_avx512_mask_cmp_pd_512)
544 .Case("ps.128", Intrinsic::x86_avx512_mask_cmp_ps_128)
545 .Case("ps.256", Intrinsic::x86_avx512_mask_cmp_ps_256)
546 .Case("ps.512", Intrinsic::x86_avx512_mask_cmp_ps_512)
549 return upgradeX86MaskedFPCompare(F, ID, NewFn);
550 return false; // No other 'x86.avx523.mask.cmp.*'.
551 }
552
553 if (Name.consume_front("avx512bf16.")) {
554 // Added in 9.0
556 .Case("cvtne2ps2bf16.128",
557 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128)
558 .Case("cvtne2ps2bf16.256",
559 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256)
560 .Case("cvtne2ps2bf16.512",
561 Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512)
562 .Case("mask.cvtneps2bf16.128",
563 Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128)
564 .Case("cvtneps2bf16.256",
565 Intrinsic::x86_avx512bf16_cvtneps2bf16_256)
566 .Case("cvtneps2bf16.512",
567 Intrinsic::x86_avx512bf16_cvtneps2bf16_512)
570 return upgradeX86BF16Intrinsic(F, ID, NewFn);
571
572 // Added in 9.0
574 .Case("dpbf16ps.128", Intrinsic::x86_avx512bf16_dpbf16ps_128)
575 .Case("dpbf16ps.256", Intrinsic::x86_avx512bf16_dpbf16ps_256)
576 .Case("dpbf16ps.512", Intrinsic::x86_avx512bf16_dpbf16ps_512)
579 return upgradeX86BF16DPIntrinsic(F, ID, NewFn);
580 return false; // No other 'x86.avx512bf16.*'.
581 }
582
583 if (Name.consume_front("xop.")) {
585 if (Name.starts_with("vpermil2")) { // Added in 3.9
586 // Upgrade any XOP PERMIL2 index operand still using a float/double
587 // vector.
588 auto Idx = F->getFunctionType()->getParamType(2);
589 if (Idx->isFPOrFPVectorTy()) {
590 unsigned IdxSize = Idx->getPrimitiveSizeInBits();
591 unsigned EltSize = Idx->getScalarSizeInBits();
592 if (EltSize == 64 && IdxSize == 128)
593 ID = Intrinsic::x86_xop_vpermil2pd;
594 else if (EltSize == 32 && IdxSize == 128)
595 ID = Intrinsic::x86_xop_vpermil2ps;
596 else if (EltSize == 64 && IdxSize == 256)
597 ID = Intrinsic::x86_xop_vpermil2pd_256;
598 else
599 ID = Intrinsic::x86_xop_vpermil2ps_256;
600 }
601 } else if (F->arg_size() == 2)
602 // frcz.ss/sd may need to have an argument dropped. Added in 3.2
604 .Case("vfrcz.ss", Intrinsic::x86_xop_vfrcz_ss)
605 .Case("vfrcz.sd", Intrinsic::x86_xop_vfrcz_sd)
607
609 rename(F);
610 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
611 return true;
612 }
613 return false; // No other 'x86.xop.*'
614 }
615
616 if (Name == "seh.recoverfp") {
617 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
618 return true;
619 }
620
621 return false;
622}
623
624// Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
625// IsArm: 'arm.*', !IsArm: 'aarch64.*'.
628 Function *&NewFn) {
629 if (Name.starts_with("rbit")) {
630 // '(arm|aarch64).rbit'.
631 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
632 F->arg_begin()->getType());
633 return true;
634 }
635
636 if (Name == "thread.pointer") {
637 // '(arm|aarch64).thread.pointer'.
638 NewFn =
639 Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
640 return true;
641 }
642
643 bool Neon = Name.consume_front("neon.");
644 if (Neon) {
645 // '(arm|aarch64).neon.*'.
646 // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and
647 // v16i8 respectively.
648 if (Name.consume_front("bfdot.")) {
649 // (arm|aarch64).neon.bfdot.*'.
652 .Cases("v2f32.v8i8", "v4f32.v16i8",
653 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfdot
654 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfdot)
657 size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
658 assert((OperandWidth == 64 || OperandWidth == 128) &&
659 "Unexpected operand width");
660 LLVMContext &Ctx = F->getParent()->getContext();
661 std::array<Type *, 2> Tys{
662 {F->getReturnType(),
663 FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)}};
664 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
665 return true;
666 }
667 return false; // No other '(arm|aarch64).neon.bfdot.*'.
668 }
669
670 // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic
671 // anymore and accept v8bf16 instead of v16i8.
672 if (Name.consume_front("bfm")) {
673 // (arm|aarch64).neon.bfm*'.
674 if (Name.consume_back(".v4f32.v16i8")) {
675 // (arm|aarch64).neon.bfm*.v4f32.v16i8'.
678 .Case("mla",
679 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmmla
680 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmmla)
681 .Case("lalb",
682 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalb
683 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalb)
684 .Case("lalt",
685 IsArm ? (Intrinsic::ID)Intrinsic::arm_neon_bfmlalt
686 : (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalt)
689 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
690 return true;
691 }
692 return false; // No other '(arm|aarch64).neon.bfm*.v16i8'.
693 }
694 return false; // No other '(arm|aarch64).neon.bfm*.
695 }
696 // Continue on to Aarch64 Neon or Arm Neon.
697 }
698 // Continue on to Arm or Aarch64.
699
700 if (IsArm) {
701 // 'arm.*'.
702 if (Neon) {
703 // 'arm.neon.*'.
705 .StartsWith("vclz.", Intrinsic::ctlz)
706 .StartsWith("vcnt.", Intrinsic::ctpop)
707 .StartsWith("vqadds.", Intrinsic::sadd_sat)
708 .StartsWith("vqaddu.", Intrinsic::uadd_sat)
709 .StartsWith("vqsubs.", Intrinsic::ssub_sat)
710 .StartsWith("vqsubu.", Intrinsic::usub_sat)
713 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
714 F->arg_begin()->getType());
715 return true;
716 }
717
718 if (Name.consume_front("vst")) {
719 // 'arm.neon.vst*'.
720 static const Regex vstRegex("^([1234]|[234]lane)\\.v[a-z0-9]*$");
722 if (vstRegex.match(Name, &Groups)) {
723 static const Intrinsic::ID StoreInts[] = {
724 Intrinsic::arm_neon_vst1, Intrinsic::arm_neon_vst2,
725 Intrinsic::arm_neon_vst3, Intrinsic::arm_neon_vst4};
726
727 static const Intrinsic::ID StoreLaneInts[] = {
728 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
729 Intrinsic::arm_neon_vst4lane};
730
731 auto fArgs = F->getFunctionType()->params();
732 Type *Tys[] = {fArgs[0], fArgs[1]};
733 if (Groups[1].size() == 1)
734 NewFn = Intrinsic::getDeclaration(F->getParent(),
735 StoreInts[fArgs.size() - 3], Tys);
736 else
738 F->getParent(), StoreLaneInts[fArgs.size() - 5], Tys);
739 return true;
740 }
741 return false; // No other 'arm.neon.vst*'.
742 }
743
744 return false; // No other 'arm.neon.*'.
745 }
746
747 if (Name.consume_front("mve.")) {
748 // 'arm.mve.*'.
749 if (Name == "vctp64") {
750 if (cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
751 // A vctp64 returning a v4i1 is converted to return a v2i1. Rename
752 // the function and deal with it below in UpgradeIntrinsicCall.
753 rename(F);
754 return true;
755 }
756 return false; // Not 'arm.mve.vctp64'.
757 }
758
759 // These too are changed to accept a v2i1 instead of the old v4i1.
760 if (Name.consume_back(".v4i1")) {
761 // 'arm.mve.*.v4i1'.
762 if (Name.consume_back(".predicated.v2i64.v4i32"))
763 // 'arm.mve.*.predicated.v2i64.v4i32.v4i1'
764 return Name == "mull.int" || Name == "vqdmull";
765
766 if (Name.consume_back(".v2i64")) {
767 // 'arm.mve.*.v2i64.v4i1'
768 bool IsGather = Name.consume_front("vldr.gather.");
769 if (IsGather || Name.consume_front("vstr.scatter.")) {
770 if (Name.consume_front("base.")) {
771 // Optional 'wb.' prefix.
772 Name.consume_front("wb.");
773 // 'arm.mve.(vldr.gather|vstr.scatter).base.(wb.)?
774 // predicated.v2i64.v2i64.v4i1'.
775 return Name == "predicated.v2i64";
776 }
777
778 if (Name.consume_front("offset.predicated."))
779 return Name == (IsGather ? "v2i64.p0i64" : "p0i64.v2i64") ||
780 Name == (IsGather ? "v2i64.p0" : "p0.v2i64");
781
782 // No other 'arm.mve.(vldr.gather|vstr.scatter).*.v2i64.v4i1'.
783 return false;
784 }
785
786 return false; // No other 'arm.mve.*.v2i64.v4i1'.
787 }
788 return false; // No other 'arm.mve.*.v4i1'.
789 }
790 return false; // No other 'arm.mve.*'.
791 }
792
793 if (Name.consume_front("cde.vcx")) {
794 // 'arm.cde.vcx*'.
795 if (Name.consume_back(".predicated.v2i64.v4i1"))
796 // 'arm.cde.vcx*.predicated.v2i64.v4i1'.
797 return Name == "1q" || Name == "1qa" || Name == "2q" || Name == "2qa" ||
798 Name == "3q" || Name == "3qa";
799
800 return false; // No other 'arm.cde.vcx*'.
801 }
802 } else {
803 // 'aarch64.*'.
804 if (Neon) {
805 // 'aarch64.neon.*'.
807 .StartsWith("frintn", Intrinsic::roundeven)
808 .StartsWith("rbit", Intrinsic::bitreverse)
811 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
812 F->arg_begin()->getType());
813 return true;
814 }
815
816 if (Name.starts_with("addp")) {
817 // 'aarch64.neon.addp*'.
818 if (F->arg_size() != 2)
819 return false; // Invalid IR.
820 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
821 if (Ty && Ty->getElementType()->isFloatingPointTy()) {
822 NewFn = Intrinsic::getDeclaration(F->getParent(),
823 Intrinsic::aarch64_neon_faddp, Ty);
824 return true;
825 }
826 }
827 return false; // No other 'aarch64.neon.*'.
828 }
829 if (Name.consume_front("sve.")) {
830 // 'aarch64.sve.*'.
831 if (Name.consume_front("bf")) {
832 if (Name.consume_back(".lane")) {
833 // 'aarch64.sve.bf*.lane'.
836 .Case("dot", Intrinsic::aarch64_sve_bfdot_lane_v2)
837 .Case("mlalb", Intrinsic::aarch64_sve_bfmlalb_lane_v2)
838 .Case("mlalt", Intrinsic::aarch64_sve_bfmlalt_lane_v2)
841 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
842 return true;
843 }
844 return false; // No other 'aarch64.sve.bf*.lane'.
845 }
846 return false; // No other 'aarch64.sve.bf*'.
847 }
848
849 if (Name.consume_front("addqv")) {
850 // 'aarch64.sve.addqv'.
851 if (!F->getReturnType()->isFPOrFPVectorTy())
852 return false;
853
854 auto Args = F->getFunctionType()->params();
855 Type *Tys[] = {F->getReturnType(), Args[1]};
856 NewFn = Intrinsic::getDeclaration(F->getParent(),
857 Intrinsic::aarch64_sve_faddqv, Tys);
858 return true;
859 }
860
861 if (Name.consume_front("ld")) {
862 // 'aarch64.sve.ld*'.
863 static const Regex LdRegex("^[234](.nxv[a-z0-9]+|$)");
864 if (LdRegex.match(Name)) {
865 Type *ScalarTy =
866 dyn_cast<VectorType>(F->getReturnType())->getElementType();
867 ElementCount EC = dyn_cast<VectorType>(F->arg_begin()->getType())
868 ->getElementCount();
869 Type *Ty = VectorType::get(ScalarTy, EC);
870 static const Intrinsic::ID LoadIDs[] = {
871 Intrinsic::aarch64_sve_ld2_sret,
872 Intrinsic::aarch64_sve_ld3_sret,
873 Intrinsic::aarch64_sve_ld4_sret,
874 };
875 NewFn = Intrinsic::getDeclaration(F->getParent(),
876 LoadIDs[Name[0] - '2'], Ty);
877 return true;
878 }
879 return false; // No other 'aarch64.sve.ld*'.
880 }
881
882 if (Name.consume_front("tuple.")) {
883 // 'aarch64.sve.tuple.*'.
884 if (Name.starts_with("get")) {
885 // 'aarch64.sve.tuple.get*'.
886 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
887 NewFn = Intrinsic::getDeclaration(F->getParent(),
888 Intrinsic::vector_extract, Tys);
889 return true;
890 }
891
892 if (Name.starts_with("set")) {
893 // 'aarch64.sve.tuple.set*'.
894 auto Args = F->getFunctionType()->params();
895 Type *Tys[] = {Args[0], Args[2], Args[1]};
896 NewFn = Intrinsic::getDeclaration(F->getParent(),
897 Intrinsic::vector_insert, Tys);
898 return true;
899 }
900
901 static const Regex CreateTupleRegex("^create[234](.nxv[a-z0-9]+|$)");
902 if (CreateTupleRegex.match(Name)) {
903 // 'aarch64.sve.tuple.create*'.
904 auto Args = F->getFunctionType()->params();
905 Type *Tys[] = {F->getReturnType(), Args[1]};
906 NewFn = Intrinsic::getDeclaration(F->getParent(),
907 Intrinsic::vector_insert, Tys);
908 return true;
909 }
910 return false; // No other 'aarch64.sve.tuple.*'.
911 }
912 return false; // No other 'aarch64.sve.*'.
913 }
914 }
915 return false; // No other 'arm.*', 'aarch64.*'.
916}
917
919 if (Name.consume_front("abs."))
921 .Case("bf16", Intrinsic::nvvm_abs_bf16)
922 .Case("bf16x2", Intrinsic::nvvm_abs_bf16x2)
924
925 if (Name.consume_front("fma.rn."))
927 .Case("bf16", Intrinsic::nvvm_fma_rn_bf16)
928 .Case("bf16x2", Intrinsic::nvvm_fma_rn_bf16x2)
929 .Case("ftz.bf16", Intrinsic::nvvm_fma_rn_ftz_bf16)
930 .Case("ftz.bf16x2", Intrinsic::nvvm_fma_rn_ftz_bf16x2)
931 .Case("ftz.relu.bf16", Intrinsic::nvvm_fma_rn_ftz_relu_bf16)
932 .Case("ftz.relu.bf16x2", Intrinsic::nvvm_fma_rn_ftz_relu_bf16x2)
933 .Case("ftz.sat.bf16", Intrinsic::nvvm_fma_rn_ftz_sat_bf16)
934 .Case("ftz.sat.bf16x2", Intrinsic::nvvm_fma_rn_ftz_sat_bf16x2)
935 .Case("relu.bf16", Intrinsic::nvvm_fma_rn_relu_bf16)
936 .Case("relu.bf16x2", Intrinsic::nvvm_fma_rn_relu_bf16x2)
937 .Case("sat.bf16", Intrinsic::nvvm_fma_rn_sat_bf16)
938 .Case("sat.bf16x2", Intrinsic::nvvm_fma_rn_sat_bf16x2)
940
941 if (Name.consume_front("fmax."))
943 .Case("bf16", Intrinsic::nvvm_fmax_bf16)
944 .Case("bf16x2", Intrinsic::nvvm_fmax_bf16x2)
945 .Case("ftz.bf16", Intrinsic::nvvm_fmax_ftz_bf16)
946 .Case("ftz.bf16x2", Intrinsic::nvvm_fmax_ftz_bf16x2)
947 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmax_ftz_nan_bf16)
948 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmax_ftz_nan_bf16x2)
949 .Case("ftz.nan.xorsign.abs.bf16",
950 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16)
951 .Case("ftz.nan.xorsign.abs.bf16x2",
952 Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16x2)
953 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16)
954 .Case("ftz.xorsign.abs.bf16x2",
955 Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16x2)
956 .Case("nan.bf16", Intrinsic::nvvm_fmax_nan_bf16)
957 .Case("nan.bf16x2", Intrinsic::nvvm_fmax_nan_bf16x2)
958 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16)
959 .Case("nan.xorsign.abs.bf16x2",
960 Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16x2)
961 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmax_xorsign_abs_bf16)
962 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmax_xorsign_abs_bf16x2)
964
965 if (Name.consume_front("fmin."))
967 .Case("bf16", Intrinsic::nvvm_fmin_bf16)
968 .Case("bf16x2", Intrinsic::nvvm_fmin_bf16x2)
969 .Case("ftz.bf16", Intrinsic::nvvm_fmin_ftz_bf16)
970 .Case("ftz.bf16x2", Intrinsic::nvvm_fmin_ftz_bf16x2)
971 .Case("ftz.nan.bf16", Intrinsic::nvvm_fmin_ftz_nan_bf16)
972 .Case("ftz.nan.bf16x2", Intrinsic::nvvm_fmin_ftz_nan_bf16x2)
973 .Case("ftz.nan.xorsign.abs.bf16",
974 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16)
975 .Case("ftz.nan.xorsign.abs.bf16x2",
976 Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16x2)
977 .Case("ftz.xorsign.abs.bf16", Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16)
978 .Case("ftz.xorsign.abs.bf16x2",
979 Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16x2)
980 .Case("nan.bf16", Intrinsic::nvvm_fmin_nan_bf16)
981 .Case("nan.bf16x2", Intrinsic::nvvm_fmin_nan_bf16x2)
982 .Case("nan.xorsign.abs.bf16", Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16)
983 .Case("nan.xorsign.abs.bf16x2",
984 Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16x2)
985 .Case("xorsign.abs.bf16", Intrinsic::nvvm_fmin_xorsign_abs_bf16)
986 .Case("xorsign.abs.bf16x2", Intrinsic::nvvm_fmin_xorsign_abs_bf16x2)
988
989 if (Name.consume_front("neg."))
991 .Case("bf16", Intrinsic::nvvm_neg_bf16)
992 .Case("bf16x2", Intrinsic::nvvm_neg_bf16x2)
994
996}
997
999 bool CanUpgradeDebugIntrinsicsToRecords) {
1000 assert(F && "Illegal to upgrade a non-existent Function.");
1001
1002 StringRef Name = F->getName();
1003
1004 // Quickly eliminate it, if it's not a candidate.
1005 if (!Name.consume_front("llvm.") || Name.empty())
1006 return false;
1007
1008 switch (Name[0]) {
1009 default: break;
1010 case 'a': {
1011 bool IsArm = Name.consume_front("arm.");
1012 if (IsArm || Name.consume_front("aarch64.")) {
1013 if (upgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
1014 return true;
1015 break;
1016 }
1017
1018 if (Name.consume_front("amdgcn.")) {
1019 if (Name == "alignbit") {
1020 // Target specific intrinsic became redundant
1021 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
1022 {F->getReturnType()});
1023 return true;
1024 }
1025
1026 if (Name.consume_front("atomic.")) {
1027 if (Name.starts_with("inc") || Name.starts_with("dec")) {
1028 // These were replaced with atomicrmw uinc_wrap and udec_wrap, so
1029 // there's no new declaration.
1030 NewFn = nullptr;
1031 return true;
1032 }
1033 break; // No other 'amdgcn.atomic.*'
1034 }
1035
1036 if (Name.starts_with("ldexp.")) {
1037 // Target specific intrinsic became redundant
1039 F->getParent(), Intrinsic::ldexp,
1040 {F->getReturnType(), F->getArg(1)->getType()});
1041 return true;
1042 }
1043 break; // No other 'amdgcn.*'
1044 }
1045
1046 break;
1047 }
1048 case 'c': {
1049 if (F->arg_size() == 1) {
1051 .StartsWith("ctlz.", Intrinsic::ctlz)
1052 .StartsWith("cttz.", Intrinsic::cttz)
1055 rename(F);
1056 NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
1057 F->arg_begin()->getType());
1058 return true;
1059 }
1060 }
1061
1062 if (F->arg_size() == 2 && Name == "coro.end") {
1063 rename(F);
1064 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::coro_end);
1065 return true;
1066 }
1067
1068 break;
1069 }
1070 case 'd':
1071 if (Name.consume_front("dbg.")) {
1072 // Mark debug intrinsics for upgrade to new debug format.
1073 if (CanUpgradeDebugIntrinsicsToRecords &&
1074 F->getParent()->IsNewDbgInfoFormat) {
1075 if (Name == "addr" || Name == "value" || Name == "assign" ||
1076 Name == "declare" || Name == "label") {
1077 // There's no function to replace these with.
1078 NewFn = nullptr;
1079 // But we do want these to get upgraded.
1080 return true;
1081 }
1082 }
1083 // Update llvm.dbg.addr intrinsics even in "new debug mode"; they'll get
1084 // converted to DbgVariableRecords later.
1085 if (Name == "addr" || (Name == "value" && F->arg_size() == 4)) {
1086 rename(F);
1087 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
1088 return true;
1089 }
1090 break; // No other 'dbg.*'.
1091 }
1092 break;
1093 case 'e':
1094 if (Name.consume_front("experimental.vector.")) {
1097 .StartsWith("extract.", Intrinsic::vector_extract)
1098 .StartsWith("insert.", Intrinsic::vector_insert)
1099 .StartsWith("splice.", Intrinsic::vector_splice)
1100 .StartsWith("reverse.", Intrinsic::vector_reverse)
1101 .StartsWith("interleave2.", Intrinsic::vector_interleave2)
1102 .StartsWith("deinterleave2.", Intrinsic::vector_deinterleave2)
1105 const auto *FT = F->getFunctionType();
1107 if (ID == Intrinsic::vector_extract ||
1108 ID == Intrinsic::vector_interleave2)
1109 // Extracting overloads the return type.
1110 Tys.push_back(FT->getReturnType());
1111 if (ID != Intrinsic::vector_interleave2)
1112 Tys.push_back(FT->getParamType(0));
1113 if (ID == Intrinsic::vector_insert)
1114 // Inserting overloads the inserted type.
1115 Tys.push_back(FT->getParamType(1));
1116 rename(F);
1117 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
1118 return true;
1119 }
1120
1121 if (Name.consume_front("reduce.")) {
1123 static const Regex R("^([a-z]+)\\.[a-z][0-9]+");
1124 if (R.match(Name, &Groups))
1126 .Case("add", Intrinsic::vector_reduce_add)
1127 .Case("mul", Intrinsic::vector_reduce_mul)
1128 .Case("and", Intrinsic::vector_reduce_and)
1129 .Case("or", Intrinsic::vector_reduce_or)
1130 .Case("xor", Intrinsic::vector_reduce_xor)
1131 .Case("smax", Intrinsic::vector_reduce_smax)
1132 .Case("smin", Intrinsic::vector_reduce_smin)
1133 .Case("umax", Intrinsic::vector_reduce_umax)
1134 .Case("umin", Intrinsic::vector_reduce_umin)
1135 .Case("fmax", Intrinsic::vector_reduce_fmax)
1136 .Case("fmin", Intrinsic::vector_reduce_fmin)
1138
1139 bool V2 = false;
1141 static const Regex R2("^v2\\.([a-z]+)\\.[fi][0-9]+");
1142 Groups.clear();
1143 V2 = true;
1144 if (R2.match(Name, &Groups))
1146 .Case("fadd", Intrinsic::vector_reduce_fadd)
1147 .Case("fmul", Intrinsic::vector_reduce_fmul)
1149 }
1151 rename(F);
1152 auto Args = F->getFunctionType()->params();
1153 NewFn =
1154 Intrinsic::getDeclaration(F->getParent(), ID, {Args[V2 ? 1 : 0]});
1155 return true;
1156 }
1157 break; // No other 'expermental.vector.reduce.*'.
1158 }
1159 break; // No other 'experimental.vector.*'.
1160 }
1161 break; // No other 'e*'.
1162 case 'f':
1163 if (Name.starts_with("flt.rounds")) {
1164 rename(F);
1165 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
1166 return true;
1167 }
1168 break;
1169 case 'i':
1170 if (Name.starts_with("invariant.group.barrier")) {
1171 // Rename invariant.group.barrier to launder.invariant.group
1172 auto Args = F->getFunctionType()->params();
1173 Type* ObjectPtr[1] = {Args[0]};
1174 rename(F);
1175 NewFn = Intrinsic::getDeclaration(F->getParent(),
1176 Intrinsic::launder_invariant_group, ObjectPtr);
1177 return true;
1178 }
1179 break;
1180 case 'm': {
1181 // Updating the memory intrinsics (memcpy/memmove/memset) that have an
1182 // alignment parameter to embedding the alignment as an attribute of
1183 // the pointer args.
1184 if (unsigned ID = StringSwitch<unsigned>(Name)
1185 .StartsWith("memcpy.", Intrinsic::memcpy)
1186 .StartsWith("memmove.", Intrinsic::memmove)
1187 .Default(0)) {
1188 if (F->arg_size() == 5) {
1189 rename(F);
1190 // Get the types of dest, src, and len
1191 ArrayRef<Type *> ParamTypes =
1192 F->getFunctionType()->params().slice(0, 3);
1193 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ParamTypes);
1194 return true;
1195 }
1196 }
1197 if (Name.starts_with("memset.") && F->arg_size() == 5) {
1198 rename(F);
1199 // Get the types of dest, and len
1200 const auto *FT = F->getFunctionType();
1201 Type *ParamTypes[2] = {
1202 FT->getParamType(0), // Dest
1203 FT->getParamType(2) // len
1204 };
1205 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
1206 ParamTypes);
1207 return true;
1208 }
1209 break;
1210 }
1211 case 'n': {
1212 if (Name.consume_front("nvvm.")) {
1213 // Check for nvvm intrinsics corresponding exactly to an LLVM intrinsic.
1214 if (F->arg_size() == 1) {
1215 Intrinsic::ID IID =
1217 .Cases("brev32", "brev64", Intrinsic::bitreverse)
1218 .Case("clz.i", Intrinsic::ctlz)
1219 .Case("popc.i", Intrinsic::ctpop)
1221 if (IID != Intrinsic::not_intrinsic) {
1222 NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
1223 {F->getReturnType()});
1224 return true;
1225 }
1226 }
1227
1228 // Check for nvvm intrinsics that need a return type adjustment.
1229 if (!F->getReturnType()->getScalarType()->isBFloatTy()) {
1231 if (IID != Intrinsic::not_intrinsic) {
1232 NewFn = nullptr;
1233 return true;
1234 }
1235 }
1236
1237 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
1238 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
1239 //
1240 // TODO: We could add lohi.i2d.
1241 bool Expand = false;
1242 if (Name.consume_front("abs."))
1243 // nvvm.abs.{i,ii}
1244 Expand = Name == "i" || Name == "ll";
1245 else if (Name == "clz.ll" || Name == "popc.ll" || Name == "h2f")
1246 Expand = true;
1247 else if (Name.consume_front("max.") || Name.consume_front("min."))
1248 // nvvm.{min,max}.{i,ii,ui,ull}
1249 Expand = Name == "s" || Name == "i" || Name == "ll" || Name == "us" ||
1250 Name == "ui" || Name == "ull";
1251 else if (Name.consume_front("atomic.load.add."))
1252 // nvvm.atomic.load.add.{f32.p,f64.p}
1253 Expand = Name.starts_with("f32.p") || Name.starts_with("f64.p");
1254 else
1255 Expand = false;
1256
1257 if (Expand) {
1258 NewFn = nullptr;
1259 return true;
1260 }
1261 break; // No other 'nvvm.*'.
1262 }
1263 break;
1264 }
1265 case 'o':
1266 // We only need to change the name to match the mangling including the
1267 // address space.
1268 if (Name.starts_with("objectsize.")) {
1269 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
1270 if (F->arg_size() == 2 || F->arg_size() == 3 ||
1271 F->getName() !=
1272 Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
1273 rename(F);
1274 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
1275 Tys);
1276 return true;
1277 }
1278 }
1279 break;
1280
1281 case 'p':
1282 if (Name.starts_with("ptr.annotation.") && F->arg_size() == 4) {
1283 rename(F);
1285 F->getParent(), Intrinsic::ptr_annotation,
1286 {F->arg_begin()->getType(), F->getArg(1)->getType()});
1287 return true;
1288 }
1289 break;
1290
1291 case 'r': {
1292 if (Name.consume_front("riscv.")) {
1295 .Case("aes32dsi", Intrinsic::riscv_aes32dsi)
1296 .Case("aes32dsmi", Intrinsic::riscv_aes32dsmi)
1297 .Case("aes32esi", Intrinsic::riscv_aes32esi)
1298 .Case("aes32esmi", Intrinsic::riscv_aes32esmi)
1301 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
1302 rename(F);
1303 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1304 return true;
1305 }
1306 break; // No other applicable upgrades.
1307 }
1308
1310 .StartsWith("sm4ks", Intrinsic::riscv_sm4ks)
1311 .StartsWith("sm4ed", Intrinsic::riscv_sm4ed)
1314 if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
1315 F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1316 rename(F);
1317 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1318 return true;
1319 }
1320 break; // No other applicable upgrades.
1321 }
1322
1324 .StartsWith("sha256sig0", Intrinsic::riscv_sha256sig0)
1325 .StartsWith("sha256sig1", Intrinsic::riscv_sha256sig1)
1326 .StartsWith("sha256sum0", Intrinsic::riscv_sha256sum0)
1327 .StartsWith("sha256sum1", Intrinsic::riscv_sha256sum1)
1328 .StartsWith("sm3p0", Intrinsic::riscv_sm3p0)
1329 .StartsWith("sm3p1", Intrinsic::riscv_sm3p1)
1332 if (F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
1333 rename(F);
1334 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1335 return true;
1336 }
1337 break; // No other applicable upgrades.
1338 }
1339 break; // No other 'riscv.*' intrinsics
1340 }
1341 } break;
1342
1343 case 's':
1344 if (Name == "stackprotectorcheck") {
1345 NewFn = nullptr;
1346 return true;
1347 }
1348 break;
1349
1350 case 'v': {
1351 if (Name == "var.annotation" && F->arg_size() == 4) {
1352 rename(F);
1354 F->getParent(), Intrinsic::var_annotation,
1355 {{F->arg_begin()->getType(), F->getArg(1)->getType()}});
1356 return true;
1357 }
1358 break;
1359 }
1360
1361 case 'w':
1362 if (Name.consume_front("wasm.")) {
1365 .StartsWith("fma.", Intrinsic::wasm_relaxed_madd)
1366 .StartsWith("fms.", Intrinsic::wasm_relaxed_nmadd)
1367 .StartsWith("laneselect.", Intrinsic::wasm_relaxed_laneselect)
1370 rename(F);
1371 NewFn =
1372 Intrinsic::getDeclaration(F->getParent(), ID, F->getReturnType());
1373 return true;
1374 }
1375
1376 if (Name.consume_front("dot.i8x16.i7x16.")) {
1378 .Case("signed", Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed)
1379 .Case("add.signed",
1380 Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed)
1383 rename(F);
1384 NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
1385 return true;
1386 }
1387 break; // No other 'wasm.dot.i8x16.i7x16.*'.
1388 }
1389 break; // No other 'wasm.*'.
1390 }
1391 break;
1392
1393 case 'x':
1394 if (upgradeX86IntrinsicFunction(F, Name, NewFn))
1395 return true;
1396 }
1397
1398 auto *ST = dyn_cast<StructType>(F->getReturnType());
1399 if (ST && (!ST->isLiteral() || ST->isPacked()) &&
1400 F->getIntrinsicID() != Intrinsic::not_intrinsic) {
1401 // Replace return type with literal non-packed struct. Only do this for
1402 // intrinsics declared to return a struct, not for intrinsics with
1403 // overloaded return type, in which case the exact struct type will be
1404 // mangled into the name.
1407 if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
1408 auto *FT = F->getFunctionType();
1409 auto *NewST = StructType::get(ST->getContext(), ST->elements());
1410 auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
1411 std::string Name = F->getName().str();
1412 rename(F);
1413 NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
1414 Name, F->getParent());
1415
1416 // The new function may also need remangling.
1417 if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn))
1418 NewFn = *Result;
1419 return true;
1420 }
1421 }
1422
1423 // Remangle our intrinsic since we upgrade the mangling
1425 if (Result != std::nullopt) {
1426 NewFn = *Result;
1427 return true;
1428 }
1429
1430 // This may not belong here. This function is effectively being overloaded
1431 // to both detect an intrinsic which needs upgrading, and to provide the
1432 // upgraded form of the intrinsic. We should perhaps have two separate
1433 // functions for this.
1434 return false;
1435}
1436
1438 bool CanUpgradeDebugIntrinsicsToRecords) {
1439 NewFn = nullptr;
1440 bool Upgraded =
1441 upgradeIntrinsicFunction1(F, NewFn, CanUpgradeDebugIntrinsicsToRecords);
1442 assert(F != NewFn && "Intrinsic function upgraded to the same function");
1443
1444 // Upgrade intrinsic attributes. This does not change the function.
1445 if (NewFn)
1446 F = NewFn;
1447 if (Intrinsic::ID id = F->getIntrinsicID())
1448 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
1449 return Upgraded;
1450}
1451
1453 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
1454 GV->getName() == "llvm.global_dtors")) ||
1455 !GV->hasInitializer())
1456 return nullptr;
1457 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
1458 if (!ATy)
1459 return nullptr;
1460 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
1461 if (!STy || STy->getNumElements() != 2)
1462 return nullptr;
1463
1464 LLVMContext &C = GV->getContext();
1465 IRBuilder<> IRB(C);
1466 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
1467 IRB.getPtrTy());
1468 Constant *Init = GV->getInitializer();
1469 unsigned N = Init->getNumOperands();
1470 std::vector<Constant *> NewCtors(N);
1471 for (unsigned i = 0; i != N; ++i) {
1472 auto Ctor = cast<Constant>(Init->getOperand(i));
1473 NewCtors[i] = ConstantStruct::get(EltTy, Ctor->getAggregateElement(0u),
1474 Ctor->getAggregateElement(1),
1476 }
1477 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
1478
1479 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
1480 NewInit, GV->getName());
1481}
1482
1483// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
1484// to byte shuffles.
1486 unsigned Shift) {
1487 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1488 unsigned NumElts = ResultTy->getNumElements() * 8;
1489
1490 // Bitcast from a 64-bit element type to a byte element type.
1491 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1492 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1493
1494 // We'll be shuffling in zeroes.
1495 Value *Res = Constant::getNullValue(VecTy);
1496
1497 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1498 // we'll just return the zero vector.
1499 if (Shift < 16) {
1500 int Idxs[64];
1501 // 256/512-bit version is split into 2/4 16-byte lanes.
1502 for (unsigned l = 0; l != NumElts; l += 16)
1503 for (unsigned i = 0; i != 16; ++i) {
1504 unsigned Idx = NumElts + i - Shift;
1505 if (Idx < NumElts)
1506 Idx -= NumElts - 16; // end of lane, switch operand.
1507 Idxs[l + i] = Idx + l;
1508 }
1509
1510 Res = Builder.CreateShuffleVector(Res, Op, ArrayRef(Idxs, NumElts));
1511 }
1512
1513 // Bitcast back to a 64-bit element type.
1514 return Builder.CreateBitCast(Res, ResultTy, "cast");
1515}
1516
1517// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
1518// to byte shuffles.
1520 unsigned Shift) {
1521 auto *ResultTy = cast<FixedVectorType>(Op->getType());
1522 unsigned NumElts = ResultTy->getNumElements() * 8;
1523
1524 // Bitcast from a 64-bit element type to a byte element type.
1525 Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
1526 Op = Builder.CreateBitCast(Op, VecTy, "cast");
1527
1528 // We'll be shuffling in zeroes.
1529 Value *Res = Constant::getNullValue(VecTy);
1530
1531 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
1532 // we'll just return the zero vector.
1533 if (Shift < 16) {
1534 int Idxs[64];
1535 // 256/512-bit version is split into 2/4 16-byte lanes.
1536 for (unsigned l = 0; l != NumElts; l += 16)
1537 for (unsigned i = 0; i != 16; ++i) {
1538 unsigned Idx = i + Shift;
1539 if (Idx >= 16)
1540 Idx += NumElts - 16; // end of lane, switch operand.
1541 Idxs[l + i] = Idx + l;
1542 }
1543
1544 Res = Builder.CreateShuffleVector(Op, Res, ArrayRef(Idxs, NumElts));
1545 }
1546
1547 // Bitcast back to a 64-bit element type.
1548 return Builder.CreateBitCast(Res, ResultTy, "cast");
1549}
1550
1551static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
1552 unsigned NumElts) {
1553 assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
1555 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
1556 Mask = Builder.CreateBitCast(Mask, MaskTy);
1557
1558 // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
1559 // i8 and we need to extract down to the right number of elements.
1560 if (NumElts <= 4) {
1561 int Indices[4];
1562 for (unsigned i = 0; i != NumElts; ++i)
1563 Indices[i] = i;
1564 Mask = Builder.CreateShuffleVector(Mask, Mask, ArrayRef(Indices, NumElts),
1565 "extract");
1566 }
1567
1568 return Mask;
1569}
1570
1571static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1572 Value *Op1) {
1573 // If the mask is all ones just emit the first operation.
1574 if (const auto *C = dyn_cast<Constant>(Mask))
1575 if (C->isAllOnesValue())
1576 return Op0;
1577
1578 Mask = getX86MaskVec(Builder, Mask,
1579 cast<FixedVectorType>(Op0->getType())->getNumElements());
1580 return Builder.CreateSelect(Mask, Op0, Op1);
1581}
1582
1583static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
1584 Value *Op1) {
1585 // If the mask is all ones just emit the first operation.
1586 if (const auto *C = dyn_cast<Constant>(Mask))
1587 if (C->isAllOnesValue())
1588 return Op0;
1589
1590 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
1591 Mask->getType()->getIntegerBitWidth());
1592 Mask = Builder.CreateBitCast(Mask, MaskTy);
1593 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
1594 return Builder.CreateSelect(Mask, Op0, Op1);
1595}
1596
1597// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
1598// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
1599// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
1601 Value *Op1, Value *Shift,
1602 Value *Passthru, Value *Mask,
1603 bool IsVALIGN) {
1604 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
1605
1606 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1607 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
1608 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
1609 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
1610
1611 // Mask the immediate for VALIGN.
1612 if (IsVALIGN)
1613 ShiftVal &= (NumElts - 1);
1614
1615 // If palignr is shifting the pair of vectors more than the size of two
1616 // lanes, emit zero.
1617 if (ShiftVal >= 32)
1619
1620 // If palignr is shifting the pair of input vectors more than one lane,
1621 // but less than two lanes, convert to shifting in zeroes.
1622 if (ShiftVal > 16) {
1623 ShiftVal -= 16;
1624 Op1 = Op0;
1626 }
1627
1628 int Indices[64];
1629 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1630 for (unsigned l = 0; l < NumElts; l += 16) {
1631 for (unsigned i = 0; i != 16; ++i) {
1632 unsigned Idx = ShiftVal + i;
1633 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
1634 Idx += NumElts - 16; // End of lane, switch operand.
1635 Indices[l + i] = Idx + l;
1636 }
1637 }
1638
1639 Value *Align = Builder.CreateShuffleVector(
1640 Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
1641
1642 return emitX86Select(Builder, Mask, Align, Passthru);
1643}
1644
1646 bool ZeroMask, bool IndexForm) {
1647 Type *Ty = CI.getType();
1648 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
1649 unsigned EltWidth = Ty->getScalarSizeInBits();
1650 bool IsFloat = Ty->isFPOrFPVectorTy();
1651 Intrinsic::ID IID;
1652 if (VecWidth == 128 && EltWidth == 32 && IsFloat)
1653 IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
1654 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
1655 IID = Intrinsic::x86_avx512_vpermi2var_d_128;
1656 else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
1657 IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
1658 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
1659 IID = Intrinsic::x86_avx512_vpermi2var_q_128;
1660 else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
1661 IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
1662 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
1663 IID = Intrinsic::x86_avx512_vpermi2var_d_256;
1664 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
1665 IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
1666 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
1667 IID = Intrinsic::x86_avx512_vpermi2var_q_256;
1668 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
1669 IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
1670 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
1671 IID = Intrinsic::x86_avx512_vpermi2var_d_512;
1672 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
1673 IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
1674 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
1675 IID = Intrinsic::x86_avx512_vpermi2var_q_512;
1676 else if (VecWidth == 128 && EltWidth == 16)
1677 IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
1678 else if (VecWidth == 256 && EltWidth == 16)
1679 IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
1680 else if (VecWidth == 512 && EltWidth == 16)
1681 IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
1682 else if (VecWidth == 128 && EltWidth == 8)
1683 IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
1684 else if (VecWidth == 256 && EltWidth == 8)
1685 IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
1686 else if (VecWidth == 512 && EltWidth == 8)
1687 IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
1688 else
1689 llvm_unreachable("Unexpected intrinsic");
1690
1691 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
1692 CI.getArgOperand(2) };
1693
1694 // If this isn't index form we need to swap operand 0 and 1.
1695 if (!IndexForm)
1696 std::swap(Args[0], Args[1]);
1697
1698 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
1699 Args);
1700 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
1701 : Builder.CreateBitCast(CI.getArgOperand(1),
1702 Ty);
1703 return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
1704}
1705
1707 Intrinsic::ID IID) {
1708 Type *Ty = CI.getType();
1709 Value *Op0 = CI.getOperand(0);
1710 Value *Op1 = CI.getOperand(1);
1711 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1712 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
1713
1714 if (CI.arg_size() == 4) { // For masked intrinsics.
1715 Value *VecSrc = CI.getOperand(2);
1716 Value *Mask = CI.getOperand(3);
1717 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1718 }
1719 return Res;
1720}
1721
1723 bool IsRotateRight) {
1724 Type *Ty = CI.getType();
1725 Value *Src = CI.getArgOperand(0);
1726 Value *Amt = CI.getArgOperand(1);
1727
1728 // Amount may be scalar immediate, in which case create a splat vector.
1729 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1730 // we only care about the lowest log2 bits anyway.
1731 if (Amt->getType() != Ty) {
1732 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1733 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1734 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1735 }
1736
1737 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1738 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1739 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
1740
1741 if (CI.arg_size() == 4) { // For masked intrinsics.
1742 Value *VecSrc = CI.getOperand(2);
1743 Value *Mask = CI.getOperand(3);
1744 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1745 }
1746 return Res;
1747}
1748
1749static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
1750 bool IsSigned) {
1751 Type *Ty = CI.getType();
1752 Value *LHS = CI.getArgOperand(0);
1753 Value *RHS = CI.getArgOperand(1);
1754
1755 CmpInst::Predicate Pred;
1756 switch (Imm) {
1757 case 0x0:
1758 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1759 break;
1760 case 0x1:
1761 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1762 break;
1763 case 0x2:
1764 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1765 break;
1766 case 0x3:
1767 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1768 break;
1769 case 0x4:
1770 Pred = ICmpInst::ICMP_EQ;
1771 break;
1772 case 0x5:
1773 Pred = ICmpInst::ICMP_NE;
1774 break;
1775 case 0x6:
1776 return Constant::getNullValue(Ty); // FALSE
1777 case 0x7:
1778 return Constant::getAllOnesValue(Ty); // TRUE
1779 default:
1780 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate");
1781 }
1782
1783 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
1784 Value *Ext = Builder.CreateSExt(Cmp, Ty);
1785 return Ext;
1786}
1787
1789 bool IsShiftRight, bool ZeroMask) {
1790 Type *Ty = CI.getType();
1791 Value *Op0 = CI.getArgOperand(0);
1792 Value *Op1 = CI.getArgOperand(1);
1793 Value *Amt = CI.getArgOperand(2);
1794
1795 if (IsShiftRight)
1796 std::swap(Op0, Op1);
1797
1798 // Amount may be scalar immediate, in which case create a splat vector.
1799 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
1800 // we only care about the lowest log2 bits anyway.
1801 if (Amt->getType() != Ty) {
1802 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1803 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
1804 Amt = Builder.CreateVectorSplat(NumElts, Amt);
1805 }
1806
1807 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
1808 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
1809 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
1810
1811 unsigned NumArgs = CI.arg_size();
1812 if (NumArgs >= 4) { // For masked intrinsics.
1813 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
1814 ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
1815 CI.getArgOperand(0);
1816 Value *Mask = CI.getOperand(NumArgs - 1);
1817 Res = emitX86Select(Builder, Mask, Res, VecSrc);
1818 }
1819 return Res;
1820}
1821
1823 Value *Mask, bool Aligned) {
1824 // Cast the pointer to the right type.
1825 Ptr = Builder.CreateBitCast(Ptr,
1826 llvm::PointerType::getUnqual(Data->getType()));
1827 const Align Alignment =
1828 Aligned
1829 ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)
1830 : Align(1);
1831
1832 // If the mask is all ones just emit a regular store.
1833 if (const auto *C = dyn_cast<Constant>(Mask))
1834 if (C->isAllOnesValue())
1835 return Builder.CreateAlignedStore(Data, Ptr, Alignment);
1836
1837 // Convert the mask from an integer type to a vector of i1.
1838 unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
1839 Mask = getX86MaskVec(Builder, Mask, NumElts);
1840 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
1841}
1842
1844 Value *Passthru, Value *Mask, bool Aligned) {
1845 Type *ValTy = Passthru->getType();
1846 // Cast the pointer to the right type.
1848 const Align Alignment =
1849 Aligned
1850 ? Align(
1852 8)
1853 : Align(1);
1854
1855 // If the mask is all ones just emit a regular store.
1856 if (const auto *C = dyn_cast<Constant>(Mask))
1857 if (C->isAllOnesValue())
1858 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
1859
1860 // Convert the mask from an integer type to a vector of i1.
1861 unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1862 Mask = getX86MaskVec(Builder, Mask, NumElts);
1863 return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
1864}
1865
1866static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
1867 Type *Ty = CI.getType();
1868 Value *Op0 = CI.getArgOperand(0);
1869 Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
1870 Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
1871 if (CI.arg_size() == 3)
1872 Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
1873 return Res;
1874}
1875
1876static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
1877 Type *Ty = CI.getType();
1878
1879 // Arguments have a vXi32 type so cast to vXi64.
1880 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
1881 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
1882
1883 if (IsSigned) {
1884 // Shift left then arithmetic shift right.
1885 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
1886 LHS = Builder.CreateShl(LHS, ShiftAmt);
1887 LHS = Builder.CreateAShr(LHS, ShiftAmt);
1888 RHS = Builder.CreateShl(RHS, ShiftAmt);
1889 RHS = Builder.CreateAShr(RHS, ShiftAmt);
1890 } else {
1891 // Clear the upper bits.
1892 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
1893 LHS = Builder.CreateAnd(LHS, Mask);
1894 RHS = Builder.CreateAnd(RHS, Mask);
1895 }
1896
1897 Value *Res = Builder.CreateMul(LHS, RHS);
1898
1899 if (CI.arg_size() == 4)
1900 Res = emitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
1901
1902 return Res;
1903}
1904
1905// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
1907 Value *Mask) {
1908 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1909 if (Mask) {
1910 const auto *C = dyn_cast<Constant>(Mask);
1911 if (!C || !C->isAllOnesValue())
1912 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
1913 }
1914
1915 if (NumElts < 8) {
1916 int Indices[8];
1917 for (unsigned i = 0; i != NumElts; ++i)
1918 Indices[i] = i;
1919 for (unsigned i = NumElts; i != 8; ++i)
1920 Indices[i] = NumElts + i % NumElts;
1921 Vec = Builder.CreateShuffleVector(Vec,
1923 Indices);
1924 }
1925 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
1926}
1927
1929 unsigned CC, bool Signed) {
1930 Value *Op0 = CI.getArgOperand(0);
1931 unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
1932
1933 Value *Cmp;
1934 if (CC == 3) {
1936 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1937 } else if (CC == 7) {
1939 FixedVectorType::get(Builder.getInt1Ty(), NumElts));
1940 } else {
1942 switch (CC) {
1943 default: llvm_unreachable("Unknown condition code");
1944 case 0: Pred = ICmpInst::ICMP_EQ; break;
1945 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
1946 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
1947 case 4: Pred = ICmpInst::ICMP_NE; break;
1948 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
1949 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
1950 }
1951 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
1952 }
1953
1954 Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
1955
1956 return applyX86MaskOn1BitsVec(Builder, Cmp, Mask);
1957}
1958
1959// Replace a masked intrinsic with an older unmasked intrinsic.
1961 Intrinsic::ID IID) {
1962 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
1963 Value *Rep = Builder.CreateCall(Intrin,
1964 { CI.getArgOperand(0), CI.getArgOperand(1) });
1965 return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
1966}
1967
1969 Value* A = CI.getArgOperand(0);
1970 Value* B = CI.getArgOperand(1);
1971 Value* Src = CI.getArgOperand(2);
1972 Value* Mask = CI.getArgOperand(3);
1973
1974 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
1975 Value* Cmp = Builder.CreateIsNotNull(AndNode);
1976 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
1977 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
1978 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
1979 return Builder.CreateInsertElement(A, Select, (uint64_t)0);
1980}
1981
1983 Value* Op = CI.getArgOperand(0);
1984 Type* ReturnOp = CI.getType();
1985 unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
1986 Value *Mask = getX86MaskVec(Builder, Op, NumElts);
1987 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
1988}
1989
1990// Replace intrinsic with unmasked version and a select.
1992 CallBase &CI, Value *&Rep) {
1993 Name = Name.substr(12); // Remove avx512.mask.
1994
1995 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
1996 unsigned EltWidth = CI.getType()->getScalarSizeInBits();
1997 Intrinsic::ID IID;
1998 if (Name.starts_with("max.p")) {
1999 if (VecWidth == 128 && EltWidth == 32)
2000 IID = Intrinsic::x86_sse_max_ps;
2001 else if (VecWidth == 128 && EltWidth == 64)
2002 IID = Intrinsic::x86_sse2_max_pd;
2003 else if (VecWidth == 256 && EltWidth == 32)
2004 IID = Intrinsic::x86_avx_max_ps_256;
2005 else if (VecWidth == 256 && EltWidth == 64)
2006 IID = Intrinsic::x86_avx_max_pd_256;
2007 else
2008 llvm_unreachable("Unexpected intrinsic");
2009 } else if (Name.starts_with("min.p")) {
2010 if (VecWidth == 128 && EltWidth == 32)
2011 IID = Intrinsic::x86_sse_min_ps;
2012 else if (VecWidth == 128 && EltWidth == 64)
2013 IID = Intrinsic::x86_sse2_min_pd;
2014 else if (VecWidth == 256 && EltWidth == 32)
2015 IID = Intrinsic::x86_avx_min_ps_256;
2016 else if (VecWidth == 256 && EltWidth == 64)
2017 IID = Intrinsic::x86_avx_min_pd_256;
2018 else
2019 llvm_unreachable("Unexpected intrinsic");
2020 } else if (Name.starts_with("pshuf.b.")) {
2021 if (VecWidth == 128)
2022 IID = Intrinsic::x86_ssse3_pshuf_b_128;
2023 else if (VecWidth == 256)
2024 IID = Intrinsic::x86_avx2_pshuf_b;
2025 else if (VecWidth == 512)
2026 IID = Intrinsic::x86_avx512_pshuf_b_512;
2027 else
2028 llvm_unreachable("Unexpected intrinsic");
2029 } else if (Name.starts_with("pmul.hr.sw.")) {
2030 if (VecWidth == 128)
2031 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
2032 else if (VecWidth == 256)
2033 IID = Intrinsic::x86_avx2_pmul_hr_sw;
2034 else if (VecWidth == 512)
2035 IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
2036 else
2037 llvm_unreachable("Unexpected intrinsic");
2038 } else if (Name.starts_with("pmulh.w.")) {
2039 if (VecWidth == 128)
2040 IID = Intrinsic::x86_sse2_pmulh_w;
2041 else if (VecWidth == 256)
2042 IID = Intrinsic::x86_avx2_pmulh_w;
2043 else if (VecWidth == 512)
2044 IID = Intrinsic::x86_avx512_pmulh_w_512;
2045 else
2046 llvm_unreachable("Unexpected intrinsic");
2047 } else if (Name.starts_with("pmulhu.w.")) {
2048 if (VecWidth == 128)
2049 IID = Intrinsic::x86_sse2_pmulhu_w;
2050 else if (VecWidth == 256)
2051 IID = Intrinsic::x86_avx2_pmulhu_w;
2052 else if (VecWidth == 512)
2053 IID = Intrinsic::x86_avx512_pmulhu_w_512;
2054 else
2055 llvm_unreachable("Unexpected intrinsic");
2056 } else if (Name.starts_with("pmaddw.d.")) {
2057 if (VecWidth == 128)
2058 IID = Intrinsic::x86_sse2_pmadd_wd;
2059 else if (VecWidth == 256)
2060 IID = Intrinsic::x86_avx2_pmadd_wd;
2061 else if (VecWidth == 512)
2062 IID = Intrinsic::x86_avx512_pmaddw_d_512;
2063 else
2064 llvm_unreachable("Unexpected intrinsic");
2065 } else if (Name.starts_with("pmaddubs.w.")) {
2066 if (VecWidth == 128)
2067 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
2068 else if (VecWidth == 256)
2069 IID = Intrinsic::x86_avx2_pmadd_ub_sw;
2070 else if (VecWidth == 512)
2071 IID = Intrinsic::x86_avx512_pmaddubs_w_512;
2072 else
2073 llvm_unreachable("Unexpected intrinsic");
2074 } else if (Name.starts_with("packsswb.")) {
2075 if (VecWidth == 128)
2076 IID = Intrinsic::x86_sse2_packsswb_128;
2077 else if (VecWidth == 256)
2078 IID = Intrinsic::x86_avx2_packsswb;
2079 else if (VecWidth == 512)
2080 IID = Intrinsic::x86_avx512_packsswb_512;
2081 else
2082 llvm_unreachable("Unexpected intrinsic");
2083 } else if (Name.starts_with("packssdw.")) {
2084 if (VecWidth == 128)
2085 IID = Intrinsic::x86_sse2_packssdw_128;
2086 else if (VecWidth == 256)
2087 IID = Intrinsic::x86_avx2_packssdw;
2088 else if (VecWidth == 512)
2089 IID = Intrinsic::x86_avx512_packssdw_512;
2090 else
2091 llvm_unreachable("Unexpected intrinsic");
2092 } else if (Name.starts_with("packuswb.")) {
2093 if (VecWidth == 128)
2094 IID = Intrinsic::x86_sse2_packuswb_128;
2095 else if (VecWidth == 256)
2096 IID = Intrinsic::x86_avx2_packuswb;
2097 else if (VecWidth == 512)
2098 IID = Intrinsic::x86_avx512_packuswb_512;
2099 else
2100 llvm_unreachable("Unexpected intrinsic");
2101 } else if (Name.starts_with("packusdw.")) {
2102 if (VecWidth == 128)
2103 IID = Intrinsic::x86_sse41_packusdw;
2104 else if (VecWidth == 256)
2105 IID = Intrinsic::x86_avx2_packusdw;
2106 else if (VecWidth == 512)
2107 IID = Intrinsic::x86_avx512_packusdw_512;
2108 else
2109 llvm_unreachable("Unexpected intrinsic");
2110 } else if (Name.starts_with("vpermilvar.")) {
2111 if (VecWidth == 128 && EltWidth == 32)
2112 IID = Intrinsic::x86_avx_vpermilvar_ps;
2113 else if (VecWidth == 128 && EltWidth == 64)
2114 IID = Intrinsic::x86_avx_vpermilvar_pd;
2115 else if (VecWidth == 256 && EltWidth == 32)
2116 IID = Intrinsic::x86_avx_vpermilvar_ps_256;
2117 else if (VecWidth == 256 && EltWidth == 64)
2118 IID = Intrinsic::x86_avx_vpermilvar_pd_256;
2119 else if (VecWidth == 512 && EltWidth == 32)
2120 IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
2121 else if (VecWidth == 512 && EltWidth == 64)
2122 IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
2123 else
2124 llvm_unreachable("Unexpected intrinsic");
2125 } else if (Name == "cvtpd2dq.256") {
2126 IID = Intrinsic::x86_avx_cvt_pd2dq_256;
2127 } else if (Name == "cvtpd2ps.256") {
2128 IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
2129 } else if (Name == "cvttpd2dq.256") {
2130 IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
2131 } else if (Name == "cvttps2dq.128") {
2132 IID = Intrinsic::x86_sse2_cvttps2dq;
2133 } else if (Name == "cvttps2dq.256") {
2134 IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
2135 } else if (Name.starts_with("permvar.")) {
2136 bool IsFloat = CI.getType()->isFPOrFPVectorTy();
2137 if (VecWidth == 256 && EltWidth == 32 && IsFloat)
2138 IID = Intrinsic::x86_avx2_permps;
2139 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
2140 IID = Intrinsic::x86_avx2_permd;
2141 else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
2142 IID = Intrinsic::x86_avx512_permvar_df_256;
2143 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
2144 IID = Intrinsic::x86_avx512_permvar_di_256;
2145 else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
2146 IID = Intrinsic::x86_avx512_permvar_sf_512;
2147 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
2148 IID = Intrinsic::x86_avx512_permvar_si_512;
2149 else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
2150 IID = Intrinsic::x86_avx512_permvar_df_512;
2151 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
2152 IID = Intrinsic::x86_avx512_permvar_di_512;
2153 else if (VecWidth == 128 && EltWidth == 16)
2154 IID = Intrinsic::x86_avx512_permvar_hi_128;
2155 else if (VecWidth == 256 && EltWidth == 16)
2156 IID = Intrinsic::x86_avx512_permvar_hi_256;
2157 else if (VecWidth == 512 && EltWidth == 16)
2158 IID = Intrinsic::x86_avx512_permvar_hi_512;
2159 else if (VecWidth == 128 && EltWidth == 8)
2160 IID = Intrinsic::x86_avx512_permvar_qi_128;
2161 else if (VecWidth == 256 && EltWidth == 8)
2162 IID = Intrinsic::x86_avx512_permvar_qi_256;
2163 else if (VecWidth == 512 && EltWidth == 8)
2164 IID = Intrinsic::x86_avx512_permvar_qi_512;
2165 else
2166 llvm_unreachable("Unexpected intrinsic");
2167 } else if (Name.starts_with("dbpsadbw.")) {
2168 if (VecWidth == 128)
2169 IID = Intrinsic::x86_avx512_dbpsadbw_128;
2170 else if (VecWidth == 256)
2171 IID = Intrinsic::x86_avx512_dbpsadbw_256;
2172 else if (VecWidth == 512)
2173 IID = Intrinsic::x86_avx512_dbpsadbw_512;
2174 else
2175 llvm_unreachable("Unexpected intrinsic");
2176 } else if (Name.starts_with("pmultishift.qb.")) {
2177 if (VecWidth == 128)
2178 IID = Intrinsic::x86_avx512_pmultishift_qb_128;
2179 else if (VecWidth == 256)
2180 IID = Intrinsic::x86_avx512_pmultishift_qb_256;
2181 else if (VecWidth == 512)
2182 IID = Intrinsic::x86_avx512_pmultishift_qb_512;
2183 else
2184 llvm_unreachable("Unexpected intrinsic");
2185 } else if (Name.starts_with("conflict.")) {
2186 if (Name[9] == 'd' && VecWidth == 128)
2187 IID = Intrinsic::x86_avx512_conflict_d_128;
2188 else if (Name[9] == 'd' && VecWidth == 256)
2189 IID = Intrinsic::x86_avx512_conflict_d_256;
2190 else if (Name[9] == 'd' && VecWidth == 512)
2191 IID = Intrinsic::x86_avx512_conflict_d_512;
2192 else if (Name[9] == 'q' && VecWidth == 128)
2193 IID = Intrinsic::x86_avx512_conflict_q_128;
2194 else if (Name[9] == 'q' && VecWidth == 256)
2195 IID = Intrinsic::x86_avx512_conflict_q_256;
2196 else if (Name[9] == 'q' && VecWidth == 512)
2197 IID = Intrinsic::x86_avx512_conflict_q_512;
2198 else
2199 llvm_unreachable("Unexpected intrinsic");
2200 } else if (Name.starts_with("pavg.")) {
2201 if (Name[5] == 'b' && VecWidth == 128)
2202 IID = Intrinsic::x86_sse2_pavg_b;
2203 else if (Name[5] == 'b' && VecWidth == 256)
2204 IID = Intrinsic::x86_avx2_pavg_b;
2205 else if (Name[5] == 'b' && VecWidth == 512)
2206 IID = Intrinsic::x86_avx512_pavg_b_512;
2207 else if (Name[5] == 'w' && VecWidth == 128)
2208 IID = Intrinsic::x86_sse2_pavg_w;
2209 else if (Name[5] == 'w' && VecWidth == 256)
2210 IID = Intrinsic::x86_avx2_pavg_w;
2211 else if (Name[5] == 'w' && VecWidth == 512)
2212 IID = Intrinsic::x86_avx512_pavg_w_512;
2213 else
2214 llvm_unreachable("Unexpected intrinsic");
2215 } else
2216 return false;
2217
2218 SmallVector<Value *, 4> Args(CI.args());
2219 Args.pop_back();
2220 Args.pop_back();
2221 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
2222 Args);
2223 unsigned NumArgs = CI.arg_size();
2224 Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
2225 CI.getArgOperand(NumArgs - 2));
2226 return true;
2227}
2228
2229/// Upgrade comment in call to inline asm that represents an objc retain release
2230/// marker.
2231void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
2232 size_t Pos;
2233 if (AsmStr->find("mov\tfp") == 0 &&
2234 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
2235 (Pos = AsmStr->find("# marker")) != std::string::npos) {
2236 AsmStr->replace(Pos, 1, ";");
2237 }
2238}
2239
2241 IRBuilder<> &Builder) {
2242 if (Name == "mve.vctp64.old") {
2243 // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
2244 // correct type.
2245 Value *VCTP = Builder.CreateCall(
2246 Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
2247 CI->getArgOperand(0), CI->getName());
2248 Value *C1 = Builder.CreateCall(
2250 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2251 {VectorType::get(Builder.getInt1Ty(), 2, false)}),
2252 VCTP);
2253 return Builder.CreateCall(
2255 F->getParent(), Intrinsic::arm_mve_pred_i2v,
2256 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2257 C1);
2258 } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
2259 Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
2260 Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
2261 Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
2262 Name ==
2263 "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
2264 Name == "mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v4i1" ||
2265 Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
2266 Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
2267 Name ==
2268 "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
2269 Name == "mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v4i1" ||
2270 Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
2271 Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
2272 Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
2273 Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
2274 Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
2275 Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
2276 std::vector<Type *> Tys;
2277 unsigned ID = CI->getIntrinsicID();
2278 Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
2279 switch (ID) {
2280 case Intrinsic::arm_mve_mull_int_predicated:
2281 case Intrinsic::arm_mve_vqdmull_predicated:
2282 case Intrinsic::arm_mve_vldr_gather_base_predicated:
2283 Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
2284 break;
2285 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
2286 case Intrinsic::arm_mve_vstr_scatter_base_predicated:
2287 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
2288 Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
2289 V2I1Ty};
2290 break;
2291 case Intrinsic::arm_mve_vldr_gather_offset_predicated:
2292 Tys = {CI->getType(), CI->getOperand(0)->getType(),
2293 CI->getOperand(1)->getType(), V2I1Ty};
2294 break;
2295 case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
2296 Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
2297 CI->getOperand(2)->getType(), V2I1Ty};
2298 break;
2299 case Intrinsic::arm_cde_vcx1q_predicated:
2300 case Intrinsic::arm_cde_vcx1qa_predicated:
2301 case Intrinsic::arm_cde_vcx2q_predicated:
2302 case Intrinsic::arm_cde_vcx2qa_predicated:
2303 case Intrinsic::arm_cde_vcx3q_predicated:
2304 case Intrinsic::arm_cde_vcx3qa_predicated:
2305 Tys = {CI->getOperand(1)->getType(), V2I1Ty};
2306 break;
2307 default:
2308 llvm_unreachable("Unhandled Intrinsic!");
2309 }
2310
2311 std::vector<Value *> Ops;
2312 for (Value *Op : CI->args()) {
2313 Type *Ty = Op->getType();
2314 if (Ty->getScalarSizeInBits() == 1) {
2315 Value *C1 = Builder.CreateCall(
2317 F->getParent(), Intrinsic::arm_mve_pred_v2i,
2318 {VectorType::get(Builder.getInt1Ty(), 4, false)}),
2319 Op);
2320 Op = Builder.CreateCall(
2321 Intrinsic::getDeclaration(F->getParent(),
2322 Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
2323 C1);
2324 }
2325 Ops.push_back(Op);
2326 }
2327
2328 Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
2329 return Builder.CreateCall(Fn, Ops, CI->getName());
2330 }
2331 llvm_unreachable("Unknown function for ARM CallBase upgrade.");
2332}
2333
2335 Function *F, IRBuilder<> &Builder) {
2336 const bool IsInc = Name.starts_with("atomic.inc.");
2337 if (IsInc || Name.starts_with("atomic.dec.")) {
2338 if (CI->getNumOperands() != 6) // Malformed bitcode.
2339 return nullptr;
2340
2341 AtomicRMWInst::BinOp RMWOp =
2343
2344 Value *Ptr = CI->getArgOperand(0);
2345 Value *Val = CI->getArgOperand(1);
2346 ConstantInt *OrderArg = dyn_cast<ConstantInt>(CI->getArgOperand(2));
2347 ConstantInt *VolatileArg = dyn_cast<ConstantInt>(CI->getArgOperand(4));
2348
2349 AtomicOrdering Order = AtomicOrdering::SequentiallyConsistent;
2350 if (OrderArg && isValidAtomicOrdering(OrderArg->getZExtValue()))
2351 Order = static_cast<AtomicOrdering>(OrderArg->getZExtValue());
2352 if (Order == AtomicOrdering::NotAtomic ||
2353 Order == AtomicOrdering::Unordered)
2354 Order = AtomicOrdering::SequentiallyConsistent;
2355
2356 // The scope argument never really worked correctly. Use agent as the most
2357 // conservative option which should still always produce the instruction.
2358 SyncScope::ID SSID = F->getContext().getOrInsertSyncScopeID("agent");
2359 AtomicRMWInst *RMW =
2360 Builder.CreateAtomicRMW(RMWOp, Ptr, Val, std::nullopt, Order, SSID);
2361
2362 if (!VolatileArg || !VolatileArg->isZero())
2363 RMW->setVolatile(true);
2364 return RMW;
2365 }
2366
2367 llvm_unreachable("Unknown function for AMDGPU intrinsic upgrade.");
2368}
2369
2370/// Helper to unwrap intrinsic call MetadataAsValue operands.
2371template <typename MDType>
2372static MDType *unwrapMAVOp(CallBase *CI, unsigned Op) {
2373 if (MetadataAsValue *MAV = dyn_cast<MetadataAsValue>(CI->getArgOperand(Op)))
2374 return dyn_cast<MDType>(MAV->getMetadata());
2375 return nullptr;
2376}
2377
2378/// Convert debug intrinsic calls to non-instruction debug records.
2379/// \p Name - Final part of the intrinsic name, e.g. 'value' in llvm.dbg.value.
2380/// \p CI - The debug intrinsic call.
2382 DbgRecord *DR = nullptr;
2383 if (Name == "label") {
2384 DR = new DbgLabelRecord(unwrapMAVOp<DILabel>(CI, 0), CI->getDebugLoc());
2385 } else if (Name == "assign") {
2386 DR = new DbgVariableRecord(
2387 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2388 unwrapMAVOp<DIExpression>(CI, 2), unwrapMAVOp<DIAssignID>(CI, 3),
2389 unwrapMAVOp<Metadata>(CI, 4), unwrapMAVOp<DIExpression>(CI, 5),
2390 CI->getDebugLoc());
2391 } else if (Name == "declare") {
2392 DR = new DbgVariableRecord(
2393 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, 1),
2394 unwrapMAVOp<DIExpression>(CI, 2), CI->getDebugLoc(),
2395 DbgVariableRecord::LocationType::Declare);
2396 } else if (Name == "addr") {
2397 // Upgrade dbg.addr to dbg.value with DW_OP_deref.
2398 DIExpression *Expr = unwrapMAVOp<DIExpression>(CI, 2);
2399 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2400 DR = new DbgVariableRecord(unwrapMAVOp<Metadata>(CI, 0),
2401 unwrapMAVOp<DILocalVariable>(CI, 1), Expr,
2402 CI->getDebugLoc());
2403 } else if (Name == "value") {
2404 // An old version of dbg.value had an extra offset argument.
2405 unsigned VarOp = 1;
2406 unsigned ExprOp = 2;
2407 if (CI->arg_size() == 4) {
2408 auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1));
2409 // Nonzero offset dbg.values get dropped without a replacement.
2410 if (!Offset || !Offset->isZeroValue())
2411 return;
2412 VarOp = 2;
2413 ExprOp = 3;
2414 }
2415 DR = new DbgVariableRecord(
2416 unwrapMAVOp<Metadata>(CI, 0), unwrapMAVOp<DILocalVariable>(CI, VarOp),
2417 unwrapMAVOp<DIExpression>(CI, ExprOp), CI->getDebugLoc());
2418 }
2419 assert(DR && "Unhandled intrinsic kind in upgrade to DbgRecord");
2421}
2422
2423/// Upgrade a call to an old intrinsic. All argument and return casting must be
2424/// provided to seamlessly integrate with existing context.
2426 // Note dyn_cast to Function is not quite the same as getCalledFunction, which
2427 // checks the callee's function type matches. It's likely we need to handle
2428 // type changes here.
2429 Function *F = dyn_cast<Function>(CI->getCalledOperand());
2430 if (!F)
2431 return;
2432
2433 LLVMContext &C = CI->getContext();
2434 IRBuilder<> Builder(C);
2435 Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
2436
2437 if (!NewFn) {
2438 bool FallthroughToDefaultUpgrade = false;
2439 // Get the Function's name.
2440 StringRef Name = F->getName();
2441
2442 assert(Name.starts_with("llvm.") && "Intrinsic doesn't start with 'llvm.'");
2443 Name = Name.substr(5);
2444
2445 bool IsX86 = Name.consume_front("x86.");
2446 bool IsNVVM = Name.consume_front("nvvm.");
2447 bool IsARM = Name.consume_front("arm.");
2448 bool IsAMDGCN = Name.consume_front("amdgcn.");
2449 bool IsDbg = Name.consume_front("dbg.");
2450
2451 if (IsX86 && Name.starts_with("sse4a.movnt.")) {
2453 Elts.push_back(
2454 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2455 MDNode *Node = MDNode::get(C, Elts);
2456
2457 Value *Arg0 = CI->getArgOperand(0);
2458 Value *Arg1 = CI->getArgOperand(1);
2459
2460 // Nontemporal (unaligned) store of the 0'th element of the float/double
2461 // vector.
2462 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
2463 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
2464 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
2465 Value *Extract =
2466 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
2467
2468 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
2469 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2470
2471 // Remove intrinsic.
2472 CI->eraseFromParent();
2473 return;
2474 }
2475
2476 if (IsX86 && (Name.starts_with("avx.movnt.") ||
2477 Name.starts_with("avx512.storent."))) {
2479 Elts.push_back(
2480 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
2481 MDNode *Node = MDNode::get(C, Elts);
2482
2483 Value *Arg0 = CI->getArgOperand(0);
2484 Value *Arg1 = CI->getArgOperand(1);
2485
2486 // Convert the type of the pointer to a pointer to the stored type.
2487 Value *BC = Builder.CreateBitCast(Arg0,
2488 PointerType::getUnqual(Arg1->getType()),
2489 "cast");
2490 StoreInst *SI = Builder.CreateAlignedStore(
2491 Arg1, BC,
2493 SI->setMetadata(LLVMContext::MD_nontemporal, Node);
2494
2495 // Remove intrinsic.
2496 CI->eraseFromParent();
2497 return;
2498 }
2499
2500 if (IsX86 && Name == "sse2.storel.dq") {
2501 Value *Arg0 = CI->getArgOperand(0);
2502 Value *Arg1 = CI->getArgOperand(1);
2503
2504 auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
2505 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
2506 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
2507 Value *BC = Builder.CreateBitCast(Arg0,
2508 PointerType::getUnqual(Elt->getType()),
2509 "cast");
2510 Builder.CreateAlignedStore(Elt, BC, Align(1));
2511
2512 // Remove intrinsic.
2513 CI->eraseFromParent();
2514 return;
2515 }
2516
2517 if (IsX86 && (Name.starts_with("sse.storeu.") ||
2518 Name.starts_with("sse2.storeu.") ||
2519 Name.starts_with("avx.storeu."))) {
2520 Value *Arg0 = CI->getArgOperand(0);
2521 Value *Arg1 = CI->getArgOperand(1);
2522
2523 Arg0 = Builder.CreateBitCast(Arg0,
2524 PointerType::getUnqual(Arg1->getType()),
2525 "cast");
2526 Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
2527
2528 // Remove intrinsic.
2529 CI->eraseFromParent();
2530 return;
2531 }
2532
2533 if (IsX86 && Name == "avx512.mask.store.ss") {
2534 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
2535 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2536 Mask, false);
2537
2538 // Remove intrinsic.
2539 CI->eraseFromParent();
2540 return;
2541 }
2542
2543 if (IsX86 && Name.starts_with("avx512.mask.store")) {
2544 // "avx512.mask.storeu." or "avx512.mask.store."
2545 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
2546 upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2547 CI->getArgOperand(2), Aligned);
2548
2549 // Remove intrinsic.
2550 CI->eraseFromParent();
2551 return;
2552 }
2553
2554 Value *Rep = nullptr;
2555 // Upgrade packed integer vector compare intrinsics to compare instructions.
2556 if (IsX86 && (Name.starts_with("sse2.pcmp") ||
2557 Name.starts_with("avx2.pcmp"))) {
2558 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
2559 bool CmpEq = Name[9] == 'e';
2560 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
2561 CI->getArgOperand(0), CI->getArgOperand(1));
2562 Rep = Builder.CreateSExt(Rep, CI->getType(), "");
2563 } else if (IsX86 && (Name.starts_with("avx512.broadcastm"))) {
2564 Type *ExtTy = Type::getInt32Ty(C);
2565 if (CI->getOperand(0)->getType()->isIntegerTy(8))
2566 ExtTy = Type::getInt64Ty(C);
2567 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
2568 ExtTy->getPrimitiveSizeInBits();
2569 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
2570 Rep = Builder.CreateVectorSplat(NumElts, Rep);
2571 } else if (IsX86 && (Name == "sse.sqrt.ss" ||
2572 Name == "sse2.sqrt.sd")) {
2573 Value *Vec = CI->getArgOperand(0);
2574 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
2575 Function *Intr = Intrinsic::getDeclaration(F->getParent(),
2576 Intrinsic::sqrt, Elt0->getType());
2577 Elt0 = Builder.CreateCall(Intr, Elt0);
2578 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
2579 } else if (IsX86 && (Name.starts_with("avx.sqrt.p") ||
2580 Name.starts_with("sse2.sqrt.p") ||
2581 Name.starts_with("sse.sqrt.p"))) {
2582 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2583 Intrinsic::sqrt,
2584 CI->getType()),
2585 {CI->getArgOperand(0)});
2586 } else if (IsX86 && (Name.starts_with("avx512.mask.sqrt.p"))) {
2587 if (CI->arg_size() == 4 &&
2588 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2589 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2590 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
2591 : Intrinsic::x86_avx512_sqrt_pd_512;
2592
2593 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
2595 IID), Args);
2596 } else {
2597 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
2598 Intrinsic::sqrt,
2599 CI->getType()),
2600 {CI->getArgOperand(0)});
2601 }
2602 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2603 CI->getArgOperand(1));
2604 } else if (IsX86 && (Name.starts_with("avx512.ptestm") ||
2605 Name.starts_with("avx512.ptestnm"))) {
2606 Value *Op0 = CI->getArgOperand(0);
2607 Value *Op1 = CI->getArgOperand(1);
2608 Value *Mask = CI->getArgOperand(2);
2609 Rep = Builder.CreateAnd(Op0, Op1);
2610 llvm::Type *Ty = Op0->getType();
2612 ICmpInst::Predicate Pred =
2613 Name.starts_with("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
2614 Rep = Builder.CreateICmp(Pred, Rep, Zero);
2615 Rep = applyX86MaskOn1BitsVec(Builder, Rep, Mask);
2616 } else if (IsX86 && (Name.starts_with("avx512.mask.pbroadcast"))){
2617 unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
2618 ->getNumElements();
2619 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
2620 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2621 CI->getArgOperand(1));
2622 } else if (IsX86 && (Name.starts_with("avx512.kunpck"))) {
2623 unsigned NumElts = CI->getType()->getScalarSizeInBits();
2624 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
2625 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
2626 int Indices[64];
2627 for (unsigned i = 0; i != NumElts; ++i)
2628 Indices[i] = i;
2629
2630 // First extract half of each vector. This gives better codegen than
2631 // doing it in a single shuffle.
2632 LHS =
2633 Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
2634 RHS =
2635 Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
2636 // Concat the vectors.
2637 // NOTE: Operands have to be swapped to match intrinsic definition.
2638 Rep = Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
2639 Rep = Builder.CreateBitCast(Rep, CI->getType());
2640 } else if (IsX86 && Name == "avx512.kand.w") {
2641 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2642 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2643 Rep = Builder.CreateAnd(LHS, RHS);
2644 Rep = Builder.CreateBitCast(Rep, CI->getType());
2645 } else if (IsX86 && Name == "avx512.kandn.w") {
2646 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2647 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2648 LHS = Builder.CreateNot(LHS);
2649 Rep = Builder.CreateAnd(LHS, RHS);
2650 Rep = Builder.CreateBitCast(Rep, CI->getType());
2651 } else if (IsX86 && Name == "avx512.kor.w") {
2652 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2653 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2654 Rep = Builder.CreateOr(LHS, RHS);
2655 Rep = Builder.CreateBitCast(Rep, CI->getType());
2656 } else if (IsX86 && Name == "avx512.kxor.w") {
2657 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2658 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2659 Rep = Builder.CreateXor(LHS, RHS);
2660 Rep = Builder.CreateBitCast(Rep, CI->getType());
2661 } else if (IsX86 && Name == "avx512.kxnor.w") {
2662 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2663 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2664 LHS = Builder.CreateNot(LHS);
2665 Rep = Builder.CreateXor(LHS, RHS);
2666 Rep = Builder.CreateBitCast(Rep, CI->getType());
2667 } else if (IsX86 && Name == "avx512.knot.w") {
2668 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2669 Rep = Builder.CreateNot(Rep);
2670 Rep = Builder.CreateBitCast(Rep, CI->getType());
2671 } else if (IsX86 &&
2672 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
2673 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
2674 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
2675 Rep = Builder.CreateOr(LHS, RHS);
2676 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
2677 Value *C;
2678 if (Name[14] == 'c')
2679 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
2680 else
2681 C = ConstantInt::getNullValue(Builder.getInt16Ty());
2682 Rep = Builder.CreateICmpEQ(Rep, C);
2683 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
2684 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
2685 Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
2686 Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
2687 Name == "sse.div.ss" || Name == "sse2.div.sd")) {
2688 Type *I32Ty = Type::getInt32Ty(C);
2689 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
2690 ConstantInt::get(I32Ty, 0));
2691 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
2692 ConstantInt::get(I32Ty, 0));
2693 Value *EltOp;
2694 if (Name.contains(".add."))
2695 EltOp = Builder.CreateFAdd(Elt0, Elt1);
2696 else if (Name.contains(".sub."))
2697 EltOp = Builder.CreateFSub(Elt0, Elt1);
2698 else if (Name.contains(".mul."))
2699 EltOp = Builder.CreateFMul(Elt0, Elt1);
2700 else
2701 EltOp = Builder.CreateFDiv(Elt0, Elt1);
2702 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
2703 ConstantInt::get(I32Ty, 0));
2704 } else if (IsX86 && Name.starts_with("avx512.mask.pcmp")) {
2705 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
2706 bool CmpEq = Name[16] == 'e';
2707 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
2708 } else if (IsX86 && Name.starts_with("avx512.mask.vpshufbitqmb.")) {
2709 Type *OpTy = CI->getArgOperand(0)->getType();
2710 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2711 Intrinsic::ID IID;
2712 switch (VecWidth) {
2713 default: llvm_unreachable("Unexpected intrinsic");
2714 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
2715 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
2716 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
2717 }
2718
2719 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2720 { CI->getOperand(0), CI->getArgOperand(1) });
2721 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2722 } else if (IsX86 && Name.starts_with("avx512.mask.fpclass.p")) {
2723 Type *OpTy = CI->getArgOperand(0)->getType();
2724 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2725 unsigned EltWidth = OpTy->getScalarSizeInBits();
2726 Intrinsic::ID IID;
2727 if (VecWidth == 128 && EltWidth == 32)
2728 IID = Intrinsic::x86_avx512_fpclass_ps_128;
2729 else if (VecWidth == 256 && EltWidth == 32)
2730 IID = Intrinsic::x86_avx512_fpclass_ps_256;
2731 else if (VecWidth == 512 && EltWidth == 32)
2732 IID = Intrinsic::x86_avx512_fpclass_ps_512;
2733 else if (VecWidth == 128 && EltWidth == 64)
2734 IID = Intrinsic::x86_avx512_fpclass_pd_128;
2735 else if (VecWidth == 256 && EltWidth == 64)
2736 IID = Intrinsic::x86_avx512_fpclass_pd_256;
2737 else if (VecWidth == 512 && EltWidth == 64)
2738 IID = Intrinsic::x86_avx512_fpclass_pd_512;
2739 else
2740 llvm_unreachable("Unexpected intrinsic");
2741
2742 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2743 { CI->getOperand(0), CI->getArgOperand(1) });
2744 Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
2745 } else if (IsX86 && Name.starts_with("avx512.cmp.p")) {
2746 SmallVector<Value *, 4> Args(CI->args());
2747 Type *OpTy = Args[0]->getType();
2748 unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
2749 unsigned EltWidth = OpTy->getScalarSizeInBits();
2750 Intrinsic::ID IID;
2751 if (VecWidth == 128 && EltWidth == 32)
2752 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
2753 else if (VecWidth == 256 && EltWidth == 32)
2754 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
2755 else if (VecWidth == 512 && EltWidth == 32)
2756 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
2757 else if (VecWidth == 128 && EltWidth == 64)
2758 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
2759 else if (VecWidth == 256 && EltWidth == 64)
2760 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
2761 else if (VecWidth == 512 && EltWidth == 64)
2762 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
2763 else
2764 llvm_unreachable("Unexpected intrinsic");
2765
2767 if (VecWidth == 512)
2768 std::swap(Mask, Args.back());
2769 Args.push_back(Mask);
2770
2771 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
2772 Args);
2773 } else if (IsX86 && Name.starts_with("avx512.mask.cmp.")) {
2774 // Integer compare intrinsics.
2775 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2776 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
2777 } else if (IsX86 && Name.starts_with("avx512.mask.ucmp.")) {
2778 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2779 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
2780 } else if (IsX86 && (Name.starts_with("avx512.cvtb2mask.") ||
2781 Name.starts_with("avx512.cvtw2mask.") ||
2782 Name.starts_with("avx512.cvtd2mask.") ||
2783 Name.starts_with("avx512.cvtq2mask."))) {
2784 Value *Op = CI->getArgOperand(0);
2785 Value *Zero = llvm::Constant::getNullValue(Op->getType());
2786 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
2787 Rep = applyX86MaskOn1BitsVec(Builder, Rep, nullptr);
2788 } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
2789 Name == "ssse3.pabs.w.128" ||
2790 Name == "ssse3.pabs.d.128" ||
2791 Name.starts_with("avx2.pabs") ||
2792 Name.starts_with("avx512.mask.pabs"))) {
2793 Rep = upgradeAbs(Builder, *CI);
2794 } else if (IsX86 && (Name == "sse41.pmaxsb" ||
2795 Name == "sse2.pmaxs.w" ||
2796 Name == "sse41.pmaxsd" ||
2797 Name.starts_with("avx2.pmaxs") ||
2798 Name.starts_with("avx512.mask.pmaxs"))) {
2799 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
2800 } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
2801 Name == "sse41.pmaxuw" ||
2802 Name == "sse41.pmaxud" ||
2803 Name.starts_with("avx2.pmaxu") ||
2804 Name.starts_with("avx512.mask.pmaxu"))) {
2805 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
2806 } else if (IsX86 && (Name == "sse41.pminsb" ||
2807 Name == "sse2.pmins.w" ||
2808 Name == "sse41.pminsd" ||
2809 Name.starts_with("avx2.pmins") ||
2810 Name.starts_with("avx512.mask.pmins"))) {
2811 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
2812 } else if (IsX86 && (Name == "sse2.pminu.b" ||
2813 Name == "sse41.pminuw" ||
2814 Name == "sse41.pminud" ||
2815 Name.starts_with("avx2.pminu") ||
2816 Name.starts_with("avx512.mask.pminu"))) {
2817 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
2818 } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
2819 Name == "avx2.pmulu.dq" ||
2820 Name == "avx512.pmulu.dq.512" ||
2821 Name.starts_with("avx512.mask.pmulu.dq."))) {
2822 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
2823 } else if (IsX86 && (Name == "sse41.pmuldq" ||
2824 Name == "avx2.pmul.dq" ||
2825 Name == "avx512.pmul.dq.512" ||
2826 Name.starts_with("avx512.mask.pmul.dq."))) {
2827 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
2828 } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
2829 Name == "sse2.cvtsi2sd" ||
2830 Name == "sse.cvtsi642ss" ||
2831 Name == "sse2.cvtsi642sd")) {
2832 Rep = Builder.CreateSIToFP(
2833 CI->getArgOperand(1),
2834 cast<VectorType>(CI->getType())->getElementType());
2835 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2836 } else if (IsX86 && Name == "avx512.cvtusi2sd") {
2837 Rep = Builder.CreateUIToFP(
2838 CI->getArgOperand(1),
2839 cast<VectorType>(CI->getType())->getElementType());
2840 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2841 } else if (IsX86 && Name == "sse2.cvtss2sd") {
2842 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
2843 Rep = Builder.CreateFPExt(
2844 Rep, cast<VectorType>(CI->getType())->getElementType());
2845 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
2846 } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
2847 Name == "sse2.cvtdq2ps" ||
2848 Name == "avx.cvtdq2.pd.256" ||
2849 Name == "avx.cvtdq2.ps.256" ||
2850 Name.starts_with("avx512.mask.cvtdq2pd.") ||
2851 Name.starts_with("avx512.mask.cvtudq2pd.") ||
2852 Name.starts_with("avx512.mask.cvtdq2ps.") ||
2853 Name.starts_with("avx512.mask.cvtudq2ps.") ||
2854 Name.starts_with("avx512.mask.cvtqq2pd.") ||
2855 Name.starts_with("avx512.mask.cvtuqq2pd.") ||
2856 Name == "avx512.mask.cvtqq2ps.256" ||
2857 Name == "avx512.mask.cvtqq2ps.512" ||
2858 Name == "avx512.mask.cvtuqq2ps.256" ||
2859 Name == "avx512.mask.cvtuqq2ps.512" ||
2860 Name == "sse2.cvtps2pd" ||
2861 Name == "avx.cvt.ps2.pd.256" ||
2862 Name == "avx512.mask.cvtps2pd.128" ||
2863 Name == "avx512.mask.cvtps2pd.256")) {
2864 auto *DstTy = cast<FixedVectorType>(CI->getType());
2865 Rep = CI->getArgOperand(0);
2866 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2867
2868 unsigned NumDstElts = DstTy->getNumElements();
2869 if (NumDstElts < SrcTy->getNumElements()) {
2870 assert(NumDstElts == 2 && "Unexpected vector size");
2871 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
2872 }
2873
2874 bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
2875 bool IsUnsigned = Name.contains("cvtu");
2876 if (IsPS2PD)
2877 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
2878 else if (CI->arg_size() == 4 &&
2879 (!isa<ConstantInt>(CI->getArgOperand(3)) ||
2880 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
2881 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
2882 : Intrinsic::x86_avx512_sitofp_round;
2884 { DstTy, SrcTy });
2885 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
2886 } else {
2887 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
2888 : Builder.CreateSIToFP(Rep, DstTy, "cvt");
2889 }
2890
2891 if (CI->arg_size() >= 3)
2892 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2893 CI->getArgOperand(1));
2894 } else if (IsX86 && (Name.starts_with("avx512.mask.vcvtph2ps.") ||
2895 Name.starts_with("vcvtph2ps."))) {
2896 auto *DstTy = cast<FixedVectorType>(CI->getType());
2897 Rep = CI->getArgOperand(0);
2898 auto *SrcTy = cast<FixedVectorType>(Rep->getType());
2899 unsigned NumDstElts = DstTy->getNumElements();
2900 if (NumDstElts != SrcTy->getNumElements()) {
2901 assert(NumDstElts == 4 && "Unexpected vector size");
2902 Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
2903 }
2904 Rep = Builder.CreateBitCast(
2905 Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
2906 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
2907 if (CI->arg_size() >= 3)
2908 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
2909 CI->getArgOperand(1));
2910 } else if (IsX86 && Name.starts_with("avx512.mask.load")) {
2911 // "avx512.mask.loadu." or "avx512.mask.load."
2912 bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
2913 Rep =
2914 upgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
2915 CI->getArgOperand(2), Aligned);
2916 } else if (IsX86 && Name.starts_with("avx512.mask.expand.load.")) {
2917 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2918 Type *PtrTy = ResultTy->getElementType();
2919
2920 // Cast the pointer to element type.
2921 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2923
2924 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2925 ResultTy->getNumElements());
2926
2927 Function *ELd = Intrinsic::getDeclaration(F->getParent(),
2928 Intrinsic::masked_expandload,
2929 ResultTy);
2930 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
2931 } else if (IsX86 && Name.starts_with("avx512.mask.compress.store.")) {
2932 auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
2933 Type *PtrTy = ResultTy->getElementType();
2934
2935 // Cast the pointer to element type.
2936 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
2938
2939 Value *MaskVec =
2940 getX86MaskVec(Builder, CI->getArgOperand(2),
2941 cast<FixedVectorType>(ResultTy)->getNumElements());
2942
2943 Function *CSt = Intrinsic::getDeclaration(F->getParent(),
2944 Intrinsic::masked_compressstore,
2945 ResultTy);
2946 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
2947 } else if (IsX86 && (Name.starts_with("avx512.mask.compress.") ||
2948 Name.starts_with("avx512.mask.expand."))) {
2949 auto *ResultTy = cast<FixedVectorType>(CI->getType());
2950
2951 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
2952 ResultTy->getNumElements());
2953
2954 bool IsCompress = Name[12] == 'c';
2955 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
2956 : Intrinsic::x86_avx512_mask_expand;
2957 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
2958 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
2959 MaskVec });
2960 } else if (IsX86 && Name.starts_with("xop.vpcom")) {
2961 bool IsSigned;
2962 if (Name.ends_with("ub") || Name.ends_with("uw") || Name.ends_with("ud") ||
2963 Name.ends_with("uq"))
2964 IsSigned = false;
2965 else if (Name.ends_with("b") || Name.ends_with("w") || Name.ends_with("d") ||
2966 Name.ends_with("q"))
2967 IsSigned = true;
2968 else
2969 llvm_unreachable("Unknown suffix");
2970
2971 unsigned Imm;
2972 if (CI->arg_size() == 3) {
2973 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
2974 } else {
2975 Name = Name.substr(9); // strip off "xop.vpcom"
2976 if (Name.starts_with("lt"))
2977 Imm = 0;
2978 else if (Name.starts_with("le"))
2979 Imm = 1;
2980 else if (Name.starts_with("gt"))
2981 Imm = 2;
2982 else if (Name.starts_with("ge"))
2983 Imm = 3;
2984 else if (Name.starts_with("eq"))
2985 Imm = 4;
2986 else if (Name.starts_with("ne"))
2987 Imm = 5;
2988 else if (Name.starts_with("false"))
2989 Imm = 6;
2990 else if (Name.starts_with("true"))
2991 Imm = 7;
2992 else
2993 llvm_unreachable("Unknown condition");
2994 }
2995
2996 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
2997 } else if (IsX86 && Name.starts_with("xop.vpcmov")) {
2998 Value *Sel = CI->getArgOperand(2);
2999 Value *NotSel = Builder.CreateNot(Sel);
3000 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
3001 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
3002 Rep = Builder.CreateOr(Sel0, Sel1);
3003 } else if (IsX86 && (Name.starts_with("xop.vprot") ||
3004 Name.starts_with("avx512.prol") ||
3005 Name.starts_with("avx512.mask.prol"))) {
3006 Rep = upgradeX86Rotate(Builder, *CI, false);
3007 } else if (IsX86 && (Name.starts_with("avx512.pror") ||
3008 Name.starts_with("avx512.mask.pror"))) {
3009 Rep = upgradeX86Rotate(Builder, *CI, true);
3010 } else if (IsX86 && (Name.starts_with("avx512.vpshld.") ||
3011 Name.starts_with("avx512.mask.vpshld") ||
3012 Name.starts_with("avx512.maskz.vpshld"))) {
3013 bool ZeroMask = Name[11] == 'z';
3014 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
3015 } else if (IsX86 && (Name.starts_with("avx512.vpshrd.") ||
3016 Name.starts_with("avx512.mask.vpshrd") ||
3017 Name.starts_with("avx512.maskz.vpshrd"))) {
3018 bool ZeroMask = Name[11] == 'z';
3019 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
3020 } else if (IsX86 && Name == "sse42.crc32.64.8") {
3021 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
3022 Intrinsic::x86_sse42_crc32_32_8);
3023 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
3024 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
3025 Rep = Builder.CreateZExt(Rep, CI->getType(), "");
3026 } else if (IsX86 && (Name.starts_with("avx.vbroadcast.s") ||
3027 Name.starts_with("avx512.vbroadcast.s"))) {
3028 // Replace broadcasts with a series of insertelements.
3029 auto *VecTy = cast<FixedVectorType>(CI->getType());
3030 Type *EltTy = VecTy->getElementType();
3031 unsigned EltNum = VecTy->getNumElements();
3032 Value *Load = Builder.CreateLoad(EltTy, CI->getArgOperand(0));
3033 Type *I32Ty = Type::getInt32Ty(C);
3034 Rep = PoisonValue::get(VecTy);
3035 for (unsigned I = 0; I < EltNum; ++I)
3036 Rep = Builder.CreateInsertElement(Rep, Load,
3037 ConstantInt::get(I32Ty, I));
3038 } else if (IsX86 && (Name.starts_with("sse41.pmovsx") ||
3039 Name.starts_with("sse41.pmovzx") ||
3040 Name.starts_with("avx2.pmovsx") ||
3041 Name.starts_with("avx2.pmovzx") ||
3042 Name.starts_with("avx512.mask.pmovsx") ||
3043 Name.starts_with("avx512.mask.pmovzx"))) {
3044 auto *DstTy = cast<FixedVectorType>(CI->getType());
3045 unsigned NumDstElts = DstTy->getNumElements();
3046
3047 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
3048 SmallVector<int, 8> ShuffleMask(NumDstElts);
3049 for (unsigned i = 0; i != NumDstElts; ++i)
3050 ShuffleMask[i] = i;
3051
3052 Value *SV =
3053 Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
3054
3055 bool DoSext = Name.contains("pmovsx");
3056 Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
3057 : Builder.CreateZExt(SV, DstTy);
3058 // If there are 3 arguments, it's a masked intrinsic so we need a select.
3059 if (CI->arg_size() == 3)
3060 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3061 CI->getArgOperand(1));
3062 } else if (Name == "avx512.mask.pmov.qd.256" ||
3063 Name == "avx512.mask.pmov.qd.512" ||
3064 Name == "avx512.mask.pmov.wb.256" ||
3065 Name == "avx512.mask.pmov.wb.512") {
3066 Type *Ty = CI->getArgOperand(1)->getType();
3067 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
3068 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3069 CI->getArgOperand(1));
3070 } else if (IsX86 && (Name.starts_with("avx.vbroadcastf128") ||
3071 Name == "avx2.vbroadcasti128")) {
3072 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
3073 Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
3074 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
3075 auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
3076 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
3077 PointerType::getUnqual(VT));
3078 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
3079 if (NumSrcElts == 2)
3080 Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
3081 else
3082 Rep = Builder.CreateShuffleVector(
3083 Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
3084 } else if (IsX86 && (Name.starts_with("avx512.mask.shuf.i") ||
3085 Name.starts_with("avx512.mask.shuf.f"))) {
3086 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3087 Type *VT = CI->getType();
3088 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
3089 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
3090 unsigned ControlBitsMask = NumLanes - 1;
3091 unsigned NumControlBits = NumLanes / 2;
3092 SmallVector<int, 8> ShuffleMask(0);
3093
3094 for (unsigned l = 0; l != NumLanes; ++l) {
3095 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
3096 // We actually need the other source.
3097 if (l >= NumLanes / 2)
3098 LaneMask += NumLanes;
3099 for (unsigned i = 0; i != NumElementsInLane; ++i)
3100 ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
3101 }
3102 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3103 CI->getArgOperand(1), ShuffleMask);
3104 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3105 CI->getArgOperand(3));
3106 }else if (IsX86 && (Name.starts_with("avx512.mask.broadcastf") ||
3107 Name.starts_with("avx512.mask.broadcasti"))) {
3108 unsigned NumSrcElts =
3109 cast<FixedVectorType>(CI->getArgOperand(0)->getType())
3110 ->getNumElements();
3111 unsigned NumDstElts =
3112 cast<FixedVectorType>(CI->getType())->getNumElements();
3113
3114 SmallVector<int, 8> ShuffleMask(NumDstElts);
3115 for (unsigned i = 0; i != NumDstElts; ++i)
3116 ShuffleMask[i] = i % NumSrcElts;
3117
3118 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
3119 CI->getArgOperand(0),
3120 ShuffleMask);
3121 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3122 CI->getArgOperand(1));
3123 } else if (IsX86 && (Name.starts_with("avx2.pbroadcast") ||
3124 Name.starts_with("avx2.vbroadcast") ||
3125 Name.starts_with("avx512.pbroadcast") ||
3126 Name.starts_with("avx512.mask.broadcast.s"))) {
3127 // Replace vp?broadcasts with a vector shuffle.
3128 Value *Op = CI->getArgOperand(0);
3129 ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
3130 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
3133 Rep = Builder.CreateShuffleVector(Op, M);
3134
3135 if (CI->arg_size() == 3)
3136 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3137 CI->getArgOperand(1));
3138 } else if (IsX86 && (Name.starts_with("sse2.padds.") ||
3139 Name.starts_with("avx2.padds.") ||
3140 Name.starts_with("avx512.padds.") ||
3141 Name.starts_with("avx512.mask.padds."))) {
3142 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
3143 } else if (IsX86 && (Name.starts_with("sse2.psubs.") ||
3144 Name.starts_with("avx2.psubs.") ||
3145 Name.starts_with("avx512.psubs.") ||
3146 Name.starts_with("avx512.mask.psubs."))) {
3147 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
3148 } else if (IsX86 && (Name.starts_with("sse2.paddus.") ||
3149 Name.starts_with("avx2.paddus.") ||
3150 Name.starts_with("avx512.mask.paddus."))) {
3151 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
3152 } else if (IsX86 && (Name.starts_with("sse2.psubus.") ||
3153 Name.starts_with("avx2.psubus.") ||
3154 Name.starts_with("avx512.mask.psubus."))) {
3155 Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
3156 } else if (IsX86 && Name.starts_with("avx512.mask.palignr.")) {
3158 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3159 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3160 false);
3161 } else if (IsX86 && Name.starts_with("avx512.mask.valign.")) {
3163 Builder, CI->getArgOperand(0), CI->getArgOperand(1),
3164 CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
3165 true);
3166 } else if (IsX86 && (Name == "sse2.psll.dq" ||
3167 Name == "avx2.psll.dq")) {
3168 // 128/256-bit shift left specified in bits.
3169 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3170 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
3171 Shift / 8); // Shift is in bits.
3172 } else if (IsX86 && (Name == "sse2.psrl.dq" ||
3173 Name == "avx2.psrl.dq")) {
3174 // 128/256-bit shift right specified in bits.
3175 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3176 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
3177 Shift / 8); // Shift is in bits.
3178 } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
3179 Name == "avx2.psll.dq.bs" ||
3180 Name == "avx512.psll.dq.512")) {
3181 // 128/256/512-bit shift left specified in bytes.
3182 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3183 Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3184 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
3185 Name == "avx2.psrl.dq.bs" ||
3186 Name == "avx512.psrl.dq.512")) {
3187 // 128/256/512-bit shift right specified in bytes.
3188 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3189 Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
3190 } else if (IsX86 && (Name == "sse41.pblendw" ||
3191 Name.starts_with("sse41.blendp") ||
3192 Name.starts_with("avx.blend.p") ||
3193 Name == "avx2.pblendw" ||
3194 Name.starts_with("avx2.pblendd."))) {
3195 Value *Op0 = CI->getArgOperand(0);
3196 Value *Op1 = CI->getArgOperand(1);
3197 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3198 auto *VecTy = cast<FixedVectorType>(CI->getType());
3199 unsigned NumElts = VecTy->getNumElements();
3200
3201 SmallVector<int, 16> Idxs(NumElts);
3202 for (unsigned i = 0; i != NumElts; ++i)
3203 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
3204
3205 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3206 } else if (IsX86 && (Name.starts_with("avx.vinsertf128.") ||
3207 Name == "avx2.vinserti128" ||
3208 Name.starts_with("avx512.mask.insert"))) {
3209 Value *Op0 = CI->getArgOperand(0);
3210 Value *Op1 = CI->getArgOperand(1);
3211 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3212 unsigned DstNumElts =
3213 cast<FixedVectorType>(CI->getType())->getNumElements();
3214 unsigned SrcNumElts =
3215 cast<FixedVectorType>(Op1->getType())->getNumElements();
3216 unsigned Scale = DstNumElts / SrcNumElts;
3217
3218 // Mask off the high bits of the immediate value; hardware ignores those.
3219 Imm = Imm % Scale;
3220
3221 // Extend the second operand into a vector the size of the destination.
3222 SmallVector<int, 8> Idxs(DstNumElts);
3223 for (unsigned i = 0; i != SrcNumElts; ++i)
3224 Idxs[i] = i;
3225 for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
3226 Idxs[i] = SrcNumElts;
3227 Rep = Builder.CreateShuffleVector(Op1, Idxs);
3228
3229 // Insert the second operand into the first operand.
3230
3231 // Note that there is no guarantee that instruction lowering will actually
3232 // produce a vinsertf128 instruction for the created shuffles. In
3233 // particular, the 0 immediate case involves no lane changes, so it can
3234 // be handled as a blend.
3235
3236 // Example of shuffle mask for 32-bit elements:
3237 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
3238 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
3239
3240 // First fill with identify mask.
3241 for (unsigned i = 0; i != DstNumElts; ++i)
3242 Idxs[i] = i;
3243 // Then replace the elements where we need to insert.
3244 for (unsigned i = 0; i != SrcNumElts; ++i)
3245 Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
3246 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
3247
3248 // If the intrinsic has a mask operand, handle that.
3249 if (CI->arg_size() == 5)
3250 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3251 CI->getArgOperand(3));
3252 } else if (IsX86 && (Name.starts_with("avx.vextractf128.") ||
3253 Name == "avx2.vextracti128" ||
3254 Name.starts_with("avx512.mask.vextract"))) {
3255 Value *Op0 = CI->getArgOperand(0);
3256 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3257 unsigned DstNumElts =
3258 cast<FixedVectorType>(CI->getType())->getNumElements();
3259 unsigned SrcNumElts =
3260 cast<FixedVectorType>(Op0->getType())->getNumElements();
3261 unsigned Scale = SrcNumElts / DstNumElts;
3262
3263 // Mask off the high bits of the immediate value; hardware ignores those.
3264 Imm = Imm % Scale;
3265
3266 // Get indexes for the subvector of the input vector.
3267 SmallVector<int, 8> Idxs(DstNumElts);
3268 for (unsigned i = 0; i != DstNumElts; ++i) {
3269 Idxs[i] = i + (Imm * DstNumElts);
3270 }
3271 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3272
3273 // If the intrinsic has a mask operand, handle that.
3274 if (CI->arg_size() == 4)
3275 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3276 CI->getArgOperand(2));
3277 } else if (!IsX86 && Name == "stackprotectorcheck") {
3278 Rep = nullptr;
3279 } else if (IsX86 && (Name.starts_with("avx512.mask.perm.df.") ||
3280 Name.starts_with("avx512.mask.perm.di."))) {
3281 Value *Op0 = CI->getArgOperand(0);
3282 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3283 auto *VecTy = cast<FixedVectorType>(CI->getType());
3284 unsigned NumElts = VecTy->getNumElements();
3285
3286 SmallVector<int, 8> Idxs(NumElts);
3287 for (unsigned i = 0; i != NumElts; ++i)
3288 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
3289
3290 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3291
3292 if (CI->arg_size() == 4)
3293 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3294 CI->getArgOperand(2));
3295 } else if (IsX86 && (Name.starts_with("avx.vperm2f128.") ||
3296 Name == "avx2.vperm2i128")) {
3297 // The immediate permute control byte looks like this:
3298 // [1:0] - select 128 bits from sources for low half of destination
3299 // [2] - ignore
3300 // [3] - zero low half of destination
3301 // [5:4] - select 128 bits from sources for high half of destination
3302 // [6] - ignore
3303 // [7] - zero high half of destination
3304
3305 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3306
3307 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3308 unsigned HalfSize = NumElts / 2;
3309 SmallVector<int, 8> ShuffleMask(NumElts);
3310
3311 // Determine which operand(s) are actually in use for this instruction.
3312 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3313 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
3314
3315 // If needed, replace operands based on zero mask.
3316 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
3317 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
3318
3319 // Permute low half of result.
3320 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
3321 for (unsigned i = 0; i < HalfSize; ++i)
3322 ShuffleMask[i] = StartIndex + i;
3323
3324 // Permute high half of result.
3325 StartIndex = (Imm & 0x10) ? HalfSize : 0;
3326 for (unsigned i = 0; i < HalfSize; ++i)
3327 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
3328
3329 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
3330
3331 } else if (IsX86 && (Name.starts_with("avx.vpermil.") ||
3332 Name == "sse2.pshuf.d" ||
3333 Name.starts_with("avx512.mask.vpermil.p") ||
3334 Name.starts_with("avx512.mask.pshuf.d."))) {
3335 Value *Op0 = CI->getArgOperand(0);
3336 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3337 auto *VecTy = cast<FixedVectorType>(CI->getType());
3338 unsigned NumElts = VecTy->getNumElements();
3339 // Calculate the size of each index in the immediate.
3340 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
3341 unsigned IdxMask = ((1 << IdxSize) - 1);
3342
3343 SmallVector<int, 8> Idxs(NumElts);
3344 // Lookup the bits for this element, wrapping around the immediate every
3345 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
3346 // to offset by the first index of each group.
3347 for (unsigned i = 0; i != NumElts; ++i)
3348 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
3349
3350 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3351
3352 if (CI->arg_size() == 4)
3353 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3354 CI->getArgOperand(2));
3355 } else if (IsX86 && (Name == "sse2.pshufl.w" ||
3356 Name.starts_with("avx512.mask.pshufl.w."))) {
3357 Value *Op0 = CI->getArgOperand(0);
3358 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3359 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3360
3361 SmallVector<int, 16> Idxs(NumElts);
3362 for (unsigned l = 0; l != NumElts; l += 8) {
3363 for (unsigned i = 0; i != 4; ++i)
3364 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
3365 for (unsigned i = 4; i != 8; ++i)
3366 Idxs[i + l] = i + l;
3367 }
3368
3369 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3370
3371 if (CI->arg_size() == 4)
3372 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3373 CI->getArgOperand(2));
3374 } else if (IsX86 && (Name == "sse2.pshufh.w" ||
3375 Name.starts_with("avx512.mask.pshufh.w."))) {
3376 Value *Op0 = CI->getArgOperand(0);
3377 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
3378 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3379
3380 SmallVector<int, 16> Idxs(NumElts);
3381 for (unsigned l = 0; l != NumElts; l += 8) {
3382 for (unsigned i = 0; i != 4; ++i)
3383 Idxs[i + l] = i + l;
3384 for (unsigned i = 0; i != 4; ++i)
3385 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
3386 }
3387
3388 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3389
3390 if (CI->arg_size() == 4)
3391 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3392 CI->getArgOperand(2));
3393 } else if (IsX86 && Name.starts_with("avx512.mask.shuf.p")) {
3394 Value *Op0 = CI->getArgOperand(0);
3395 Value *Op1 = CI->getArgOperand(1);
3396 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
3397 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3398
3399 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3400 unsigned HalfLaneElts = NumLaneElts / 2;
3401
3402 SmallVector<int, 16> Idxs(NumElts);
3403 for (unsigned i = 0; i != NumElts; ++i) {
3404 // Base index is the starting element of the lane.
3405 Idxs[i] = i - (i % NumLaneElts);
3406 // If we are half way through the lane switch to the other source.
3407 if ((i % NumLaneElts) >= HalfLaneElts)
3408 Idxs[i] += NumElts;
3409 // Now select the specific element. By adding HalfLaneElts bits from
3410 // the immediate. Wrapping around the immediate every 8-bits.
3411 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
3412 }
3413
3414 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3415
3416 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
3417 CI->getArgOperand(3));
3418 } else if (IsX86 && (Name.starts_with("avx512.mask.movddup") ||
3419 Name.starts_with("avx512.mask.movshdup") ||
3420 Name.starts_with("avx512.mask.movsldup"))) {
3421 Value *Op0 = CI->getArgOperand(0);
3422 unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3423 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3424
3425 unsigned Offset = 0;
3426 if (Name.starts_with("avx512.mask.movshdup."))
3427 Offset = 1;
3428
3429 SmallVector<int, 16> Idxs(NumElts);
3430 for (unsigned l = 0; l != NumElts; l += NumLaneElts)
3431 for (unsigned i = 0; i != NumLaneElts; i += 2) {
3432 Idxs[i + l + 0] = i + l + Offset;
3433 Idxs[i + l + 1] = i + l + Offset;
3434 }
3435
3436 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
3437
3438 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3439 CI->getArgOperand(1));
3440 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckl") ||
3441 Name.starts_with("avx512.mask.unpckl."))) {
3442 Value *Op0 = CI->getArgOperand(0);
3443 Value *Op1 = CI->getArgOperand(1);
3444 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3445 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3446
3447 SmallVector<int, 64> Idxs(NumElts);
3448 for (int l = 0; l != NumElts; l += NumLaneElts)
3449 for (int i = 0; i != NumLaneElts; ++i)
3450 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
3451
3452 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3453
3454 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3455 CI->getArgOperand(2));
3456 } else if (IsX86 && (Name.starts_with("avx512.mask.punpckh") ||
3457 Name.starts_with("avx512.mask.unpckh."))) {
3458 Value *Op0 = CI->getArgOperand(0);
3459 Value *Op1 = CI->getArgOperand(1);
3460 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
3461 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
3462
3463 SmallVector<int, 64> Idxs(NumElts);
3464 for (int l = 0; l != NumElts; l += NumLaneElts)
3465 for (int i = 0; i != NumLaneElts; ++i)
3466 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
3467
3468 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
3469
3470 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3471 CI->getArgOperand(2));
3472 } else if (IsX86 && (Name.starts_with("avx512.mask.and.") ||
3473 Name.starts_with("avx512.mask.pand."))) {
3474 VectorType *FTy = cast<VectorType>(CI->getType());
3475 VectorType *ITy = VectorType::getInteger(FTy);
3476 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3477 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3478 Rep = Builder.CreateBitCast(Rep, FTy);
3479 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3480 CI->getArgOperand(2));
3481 } else if (IsX86 && (Name.starts_with("avx512.mask.andn.") ||
3482 Name.starts_with("avx512.mask.pandn."))) {
3483 VectorType *FTy = cast<VectorType>(CI->getType());
3484 VectorType *ITy = VectorType::getInteger(FTy);
3485 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
3486 Rep = Builder.CreateAnd(Rep,
3487 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3488 Rep = Builder.CreateBitCast(Rep, FTy);
3489 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3490 CI->getArgOperand(2));
3491 } else if (IsX86 && (Name.starts_with("avx512.mask.or.") ||
3492 Name.starts_with("avx512.mask.por."))) {
3493 VectorType *FTy = cast<VectorType>(CI->getType());
3494 VectorType *ITy = VectorType::getInteger(FTy);
3495 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3496 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3497 Rep = Builder.CreateBitCast(Rep, FTy);
3498 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3499 CI->getArgOperand(2));
3500 } else if (IsX86 && (Name.starts_with("avx512.mask.xor.") ||
3501 Name.starts_with("avx512.mask.pxor."))) {
3502 VectorType *FTy = cast<VectorType>(CI->getType());
3503 VectorType *ITy = VectorType::getInteger(FTy);
3504 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
3505 Builder.CreateBitCast(CI->getArgOperand(1), ITy));
3506 Rep = Builder.CreateBitCast(Rep, FTy);
3507 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3508 CI->getArgOperand(2));
3509 } else if (IsX86 && Name.starts_with("avx512.mask.padd.")) {
3510 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3511 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3512 CI->getArgOperand(2));
3513 } else if (IsX86 && Name.starts_with("avx512.mask.psub.")) {
3514 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
3515 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3516 CI->getArgOperand(2));
3517 } else if (IsX86 && Name.starts_with("avx512.mask.pmull.")) {
3518 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
3519 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3520 CI->getArgOperand(2));
3521 } else if (IsX86 && Name.starts_with("avx512.mask.add.p")) {
3522 if (Name.ends_with(".512")) {
3523 Intrinsic::ID IID;
3524 if (Name[17] == 's')
3525 IID = Intrinsic::x86_avx512_add_ps_512;
3526 else
3527 IID = Intrinsic::x86_avx512_add_pd_512;
3528
3529 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3530 { CI->getArgOperand(0), CI->getArgOperand(1),
3531 CI->getArgOperand(4) });
3532 } else {
3533 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
3534 }
3535 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3536 CI->getArgOperand(2));
3537 } else if (IsX86 && Name.starts_with("avx512.mask.div.p")) {
3538 if (Name.ends_with(".512")) {
3539 Intrinsic::ID IID;
3540 if (Name[17] == 's')
3541 IID = Intrinsic::x86_avx512_div_ps_512;
3542 else
3543 IID = Intrinsic::x86_avx512_div_pd_512;
3544
3545 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3546 { CI->getArgOperand(0), CI->getArgOperand(1),
3547 CI->getArgOperand(4) });
3548 } else {
3549 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
3550 }
3551 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3552 CI->getArgOperand(2));
3553 } else if (IsX86 && Name.starts_with("avx512.mask.mul.p")) {
3554 if (Name.ends_with(".512")) {
3555 Intrinsic::ID IID;
3556 if (Name[17] == 's')
3557 IID = Intrinsic::x86_avx512_mul_ps_512;
3558 else
3559 IID = Intrinsic::x86_avx512_mul_pd_512;
3560
3561 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3562 { CI->getArgOperand(0), CI->getArgOperand(1),
3563 CI->getArgOperand(4) });
3564 } else {
3565 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
3566 }
3567 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3568 CI->getArgOperand(2));
3569 } else if (IsX86 && Name.starts_with("avx512.mask.sub.p")) {
3570 if (Name.ends_with(".512")) {
3571 Intrinsic::ID IID;
3572 if (Name[17] == 's')
3573 IID = Intrinsic::x86_avx512_sub_ps_512;
3574 else
3575 IID = Intrinsic::x86_avx512_sub_pd_512;
3576
3577 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3578 { CI->getArgOperand(0), CI->getArgOperand(1),
3579 CI->getArgOperand(4) });
3580 } else {
3581 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
3582 }
3583 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3584 CI->getArgOperand(2));
3585 } else if (IsX86 && (Name.starts_with("avx512.mask.max.p") ||
3586 Name.starts_with("avx512.mask.min.p")) &&
3587 Name.drop_front(18) == ".512") {
3588 bool IsDouble = Name[17] == 'd';
3589 bool IsMin = Name[13] == 'i';
3590 static const Intrinsic::ID MinMaxTbl[2][2] = {
3591 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
3592 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
3593 };
3594 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
3595
3596 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3597 { CI->getArgOperand(0), CI->getArgOperand(1),
3598 CI->getArgOperand(4) });
3599 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
3600 CI->getArgOperand(2));
3601 } else if (IsX86 && Name.starts_with("avx512.mask.lzcnt.")) {
3602 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
3603 Intrinsic::ctlz,
3604 CI->getType()),
3605 { CI->getArgOperand(0), Builder.getInt1(false) });
3606 Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
3607 CI->getArgOperand(1));
3608 } else if (IsX86 && Name.starts_with("avx512.mask.psll")) {
3609 bool IsImmediate = Name[16] == 'i' ||
3610 (Name.size() > 18 && Name[18] == 'i');
3611 bool IsVariable = Name[16] == 'v';
3612 char Size = Name[16] == '.' ? Name[17] :
3613 Name[17] == '.' ? Name[18] :
3614 Name[18] == '.' ? Name[19] :
3615 Name[20];
3616
3617 Intrinsic::ID IID;
3618 if (IsVariable && Name[17] != '.') {
3619 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
3620 IID = Intrinsic::x86_avx2_psllv_q;
3621 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
3622 IID = Intrinsic::x86_avx2_psllv_q_256;
3623 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
3624 IID = Intrinsic::x86_avx2_psllv_d;
3625 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
3626 IID = Intrinsic::x86_avx2_psllv_d_256;
3627 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
3628 IID = Intrinsic::x86_avx512_psllv_w_128;
3629 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
3630 IID = Intrinsic::x86_avx512_psllv_w_256;
3631 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
3632 IID = Intrinsic::x86_avx512_psllv_w_512;
3633 else
3634 llvm_unreachable("Unexpected size");
3635 } else if (Name.ends_with(".128")) {
3636 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
3637 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
3638 : Intrinsic::x86_sse2_psll_d;
3639 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
3640 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
3641 : Intrinsic::x86_sse2_psll_q;
3642 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
3643 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
3644 : Intrinsic::x86_sse2_psll_w;
3645 else
3646 llvm_unreachable("Unexpected size");
3647 } else if (Name.ends_with(".256")) {
3648 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
3649 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
3650 : Intrinsic::x86_avx2_psll_d;
3651 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
3652 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
3653 : Intrinsic::x86_avx2_psll_q;
3654 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
3655 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
3656 : Intrinsic::x86_avx2_psll_w;
3657 else
3658 llvm_unreachable("Unexpected size");
3659 } else {
3660 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
3661 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
3662 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
3663 Intrinsic::x86_avx512_psll_d_512;
3664 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
3665 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
3666 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
3667 Intrinsic::x86_avx512_psll_q_512;
3668 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
3669 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
3670 : Intrinsic::x86_avx512_psll_w_512;
3671 else
3672 llvm_unreachable("Unexpected size");
3673 }
3674
3675 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3676 } else if (IsX86 && Name.starts_with("avx512.mask.psrl")) {
3677 bool IsImmediate = Name[16] == 'i' ||
3678 (Name.size() > 18 && Name[18] == 'i');
3679 bool IsVariable = Name[16] == 'v';
3680 char Size = Name[16] == '.' ? Name[17] :
3681 Name[17] == '.' ? Name[18] :
3682 Name[18] == '.' ? Name[19] :
3683 Name[20];
3684
3685 Intrinsic::ID IID;
3686 if (IsVariable && Name[17] != '.') {
3687 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
3688 IID = Intrinsic::x86_avx2_psrlv_q;
3689 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
3690 IID = Intrinsic::x86_avx2_psrlv_q_256;
3691 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
3692 IID = Intrinsic::x86_avx2_psrlv_d;
3693 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
3694 IID = Intrinsic::x86_avx2_psrlv_d_256;
3695 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
3696 IID = Intrinsic::x86_avx512_psrlv_w_128;
3697 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
3698 IID = Intrinsic::x86_avx512_psrlv_w_256;
3699 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
3700 IID = Intrinsic::x86_avx512_psrlv_w_512;
3701 else
3702 llvm_unreachable("Unexpected size");
3703 } else if (Name.ends_with(".128")) {
3704 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
3705 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
3706 : Intrinsic::x86_sse2_psrl_d;
3707 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
3708 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
3709 : Intrinsic::x86_sse2_psrl_q;
3710 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
3711 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
3712 : Intrinsic::x86_sse2_psrl_w;
3713 else
3714 llvm_unreachable("Unexpected size");
3715 } else if (Name.ends_with(".256")) {
3716 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
3717 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
3718 : Intrinsic::x86_avx2_psrl_d;
3719 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
3720 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
3721 : Intrinsic::x86_avx2_psrl_q;
3722 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
3723 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
3724 : Intrinsic::x86_avx2_psrl_w;
3725 else
3726 llvm_unreachable("Unexpected size");
3727 } else {
3728 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
3729 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
3730 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
3731 Intrinsic::x86_avx512_psrl_d_512;
3732 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
3733 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
3734 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
3735 Intrinsic::x86_avx512_psrl_q_512;
3736 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
3737 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
3738 : Intrinsic::x86_avx512_psrl_w_512;
3739 else
3740 llvm_unreachable("Unexpected size");
3741 }
3742
3743 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3744 } else if (IsX86 && Name.starts_with("avx512.mask.psra")) {
3745 bool IsImmediate = Name[16] == 'i' ||
3746 (Name.size() > 18 && Name[18] == 'i');
3747 bool IsVariable = Name[16] == 'v';
3748 char Size = Name[16] == '.' ? Name[17] :
3749 Name[17] == '.' ? Name[18] :
3750 Name[18] == '.' ? Name[19] :
3751 Name[20];
3752
3753 Intrinsic::ID IID;
3754 if (IsVariable && Name[17] != '.') {
3755 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
3756 IID = Intrinsic::x86_avx2_psrav_d;
3757 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
3758 IID = Intrinsic::x86_avx2_psrav_d_256;
3759 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
3760 IID = Intrinsic::x86_avx512_psrav_w_128;
3761 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
3762 IID = Intrinsic::x86_avx512_psrav_w_256;
3763 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
3764 IID = Intrinsic::x86_avx512_psrav_w_512;
3765 else
3766 llvm_unreachable("Unexpected size");
3767 } else if (Name.ends_with(".128")) {
3768 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
3769 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
3770 : Intrinsic::x86_sse2_psra_d;
3771 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
3772 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
3773 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
3774 Intrinsic::x86_avx512_psra_q_128;
3775 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
3776 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
3777 : Intrinsic::x86_sse2_psra_w;
3778 else
3779 llvm_unreachable("Unexpected size");
3780 } else if (Name.ends_with(".256")) {
3781 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
3782 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
3783 : Intrinsic::x86_avx2_psra_d;
3784 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
3785 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
3786 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
3787 Intrinsic::x86_avx512_psra_q_256;
3788 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
3789 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
3790 : Intrinsic::x86_avx2_psra_w;
3791 else
3792 llvm_unreachable("Unexpected size");
3793 } else {
3794 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
3795 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
3796 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
3797 Intrinsic::x86_avx512_psra_d_512;
3798 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
3799 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
3800 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
3801 Intrinsic::x86_avx512_psra_q_512;
3802 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
3803 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
3804 : Intrinsic::x86_avx512_psra_w_512;
3805 else
3806 llvm_unreachable("Unexpected size");
3807 }
3808
3809 Rep = upgradeX86MaskedShift(Builder, *CI, IID);
3810 } else if (IsX86 && Name.starts_with("avx512.mask.move.s")) {
3811 Rep = upgradeMaskedMove(Builder, *CI);
3812 } else if (IsX86 && Name.starts_with("avx512.cvtmask2")) {
3813 Rep = upgradeMaskToInt(Builder, *CI);
3814 } else if (IsX86 && Name.ends_with(".movntdqa")) {
3815 MDNode *Node = MDNode::get(
3816 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
3817
3818 Value *Ptr = CI->getArgOperand(0);
3819
3820 // Convert the type of the pointer to a pointer to the stored type.
3821 Value *BC = Builder.CreateBitCast(
3822 Ptr, PointerType::getUnqual(CI->getType()), "cast");
3823 LoadInst *LI = Builder.CreateAlignedLoad(
3824 CI->getType(), BC,
3826 LI->setMetadata(LLVMContext::MD_nontemporal, Node);
3827 Rep = LI;
3828 } else if (IsX86 && (Name.starts_with("fma.vfmadd.") ||
3829 Name.starts_with("fma.vfmsub.") ||
3830 Name.starts_with("fma.vfnmadd.") ||
3831 Name.starts_with("fma.vfnmsub."))) {
3832 bool NegMul = Name[6] == 'n';
3833 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
3834 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
3835
3836 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3837 CI->getArgOperand(2) };
3838
3839 if (IsScalar) {
3840 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3841 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3842 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3843 }
3844
3845 if (NegMul && !IsScalar)
3846 Ops[0] = Builder.CreateFNeg(Ops[0]);
3847 if (NegMul && IsScalar)
3848 Ops[1] = Builder.CreateFNeg(Ops[1]);
3849 if (NegAcc)
3850 Ops[2] = Builder.CreateFNeg(Ops[2]);
3851
3853 Intrinsic::fma,
3854 Ops[0]->getType()),
3855 Ops);
3856
3857 if (IsScalar)
3858 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
3859 (uint64_t)0);
3860 } else if (IsX86 && Name.starts_with("fma4.vfmadd.s")) {
3861 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3862 CI->getArgOperand(2) };
3863
3864 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
3865 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
3866 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
3867
3869 Intrinsic::fma,
3870 Ops[0]->getType()),
3871 Ops);
3872
3874 Rep, (uint64_t)0);
3875 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.s") ||
3876 Name.starts_with("avx512.maskz.vfmadd.s") ||
3877 Name.starts_with("avx512.mask3.vfmadd.s") ||
3878 Name.starts_with("avx512.mask3.vfmsub.s") ||
3879 Name.starts_with("avx512.mask3.vfnmsub.s"))) {
3880 bool IsMask3 = Name[11] == '3';
3881 bool IsMaskZ = Name[11] == 'z';
3882 // Drop the "avx512.mask." to make it easier.
3883 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3884 bool NegMul = Name[2] == 'n';
3885 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3886
3887 Value *A = CI->getArgOperand(0);
3888 Value *B = CI->getArgOperand(1);
3889 Value *C = CI->getArgOperand(2);
3890
3891 if (NegMul && (IsMask3 || IsMaskZ))
3892 A = Builder.CreateFNeg(A);
3893 if (NegMul && !(IsMask3 || IsMaskZ))
3894 B = Builder.CreateFNeg(B);
3895 if (NegAcc)
3896 C = Builder.CreateFNeg(C);
3897
3898 A = Builder.CreateExtractElement(A, (uint64_t)0);
3899 B = Builder.CreateExtractElement(B, (uint64_t)0);
3900 C = Builder.CreateExtractElement(C, (uint64_t)0);
3901
3902 if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3903 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
3904 Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
3905
3906 Intrinsic::ID IID;
3907 if (Name.back() == 'd')
3908 IID = Intrinsic::x86_avx512_vfmadd_f64;
3909 else
3910 IID = Intrinsic::x86_avx512_vfmadd_f32;
3911 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
3912 Rep = Builder.CreateCall(FMA, Ops);
3913 } else {
3915 Intrinsic::fma,
3916 A->getType());
3917 Rep = Builder.CreateCall(FMA, { A, B, C });
3918 }
3919
3920 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
3921 IsMask3 ? C : A;
3922
3923 // For Mask3 with NegAcc, we need to create a new extractelement that
3924 // avoids the negation above.
3925 if (NegAcc && IsMask3)
3926 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
3927 (uint64_t)0);
3928
3929 Rep = emitX86ScalarSelect(Builder, CI->getArgOperand(3), Rep, PassThru);
3930 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
3931 Rep, (uint64_t)0);
3932 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.p") ||
3933 Name.starts_with("avx512.mask.vfnmadd.p") ||
3934 Name.starts_with("avx512.mask.vfnmsub.p") ||
3935 Name.starts_with("avx512.mask3.vfmadd.p") ||
3936 Name.starts_with("avx512.mask3.vfmsub.p") ||
3937 Name.starts_with("avx512.mask3.vfnmsub.p") ||
3938 Name.starts_with("avx512.maskz.vfmadd.p"))) {
3939 bool IsMask3 = Name[11] == '3';
3940 bool IsMaskZ = Name[11] == 'z';
3941 // Drop the "avx512.mask." to make it easier.
3942 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
3943 bool NegMul = Name[2] == 'n';
3944 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
3945
3946 Value *A = CI->getArgOperand(0);
3947 Value *B = CI->getArgOperand(1);
3948 Value *C = CI->getArgOperand(2);
3949
3950 if (NegMul && (IsMask3 || IsMaskZ))
3951 A = Builder.CreateFNeg(A);
3952 if (NegMul && !(IsMask3 || IsMaskZ))
3953 B = Builder.CreateFNeg(B);
3954 if (NegAcc)
3955 C = Builder.CreateFNeg(C);
3956
3957 if (CI->arg_size() == 5 &&
3958 (!isa<ConstantInt>(CI->getArgOperand(4)) ||
3959 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
3960 Intrinsic::ID IID;
3961 // Check the character before ".512" in string.
3962 if (Name[Name.size()-5] == 's')
3963 IID = Intrinsic::x86_avx512_vfmadd_ps_512;
3964 else
3965 IID = Intrinsic::x86_avx512_vfmadd_pd_512;
3966
3967 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
3968 { A, B, C, CI->getArgOperand(4) });
3969 } else {
3971 Intrinsic::fma,
3972 A->getType());
3973 Rep = Builder.CreateCall(FMA, { A, B, C });
3974 }
3975
3976 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
3977 IsMask3 ? CI->getArgOperand(2) :
3978 CI->getArgOperand(0);
3979
3980 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
3981 } else if (IsX86 && Name.starts_with("fma.vfmsubadd.p")) {
3982 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
3983 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
3984 Intrinsic::ID IID;
3985 if (VecWidth == 128 && EltWidth == 32)
3986 IID = Intrinsic::x86_fma_vfmaddsub_ps;
3987 else if (VecWidth == 256 && EltWidth == 32)
3988 IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
3989 else if (VecWidth == 128 && EltWidth == 64)
3990 IID = Intrinsic::x86_fma_vfmaddsub_pd;
3991 else if (VecWidth == 256 && EltWidth == 64)
3992 IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
3993 else
3994 llvm_unreachable("Unexpected intrinsic");
3995
3996 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
3997 CI->getArgOperand(2) };
3998 Ops[2] = Builder.CreateFNeg(Ops[2]);
3999 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
4000 Ops);
4001 } else if (IsX86 && (Name.starts_with("avx512.mask.vfmaddsub.p") ||
4002 Name.starts_with("avx512.mask3.vfmaddsub.p") ||
4003 Name.starts_with("avx512.maskz.vfmaddsub.p") ||
4004 Name.starts_with("avx512.mask3.vfmsubadd.p"))) {
4005 bool IsMask3 = Name[11] == '3';
4006 bool IsMaskZ = Name[11] == 'z';
4007 // Drop the "avx512.mask." to make it easier.
4008 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
4009 bool IsSubAdd = Name[3] == 's';
4010 if (CI->arg_size() == 5) {
4011 Intrinsic::ID IID;
4012 // Check the character before ".512" in string.
4013 if (Name[Name.size()-5] == 's')
4014 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
4015 else
4016 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
4017
4018 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4019 CI->getArgOperand(2), CI->getArgOperand(4) };
4020 if (IsSubAdd)
4021 Ops[2] = Builder.CreateFNeg(Ops[2]);
4022
4023 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
4024 Ops);
4025 } else {
4026 int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
4027
4028 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4029 CI->getArgOperand(2) };
4030
4031 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
4032 Ops[0]->getType());
4033 Value *Odd = Builder.CreateCall(FMA, Ops);
4034 Ops[2] = Builder.CreateFNeg(Ops[2]);
4035 Value *Even = Builder.CreateCall(FMA, Ops);
4036
4037 if (IsSubAdd)
4038 std::swap(Even, Odd);
4039
4040 SmallVector<int, 32> Idxs(NumElts);
4041 for (int i = 0; i != NumElts; ++i)
4042 Idxs[i] = i + (i % 2) * NumElts;
4043
4044 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
4045 }
4046
4047 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
4048 IsMask3 ? CI->getArgOperand(2) :
4049 CI->getArgOperand(0);
4050
4051 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4052 } else if (IsX86 && (Name.starts_with("avx512.mask.pternlog.") ||
4053 Name.starts_with("avx512.maskz.pternlog."))) {
4054 bool ZeroMask = Name[11] == 'z';
4055 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4056 unsigned EltWidth = CI->getType()->getScalarSizeInBits();
4057 Intrinsic::ID IID;
4058 if (VecWidth == 128 && EltWidth == 32)
4059 IID = Intrinsic::x86_avx512_pternlog_d_128;
4060 else if (VecWidth == 256 && EltWidth == 32)
4061 IID = Intrinsic::x86_avx512_pternlog_d_256;
4062 else if (VecWidth == 512 && EltWidth == 32)
4063 IID = Intrinsic::x86_avx512_pternlog_d_512;
4064 else if (VecWidth == 128 && EltWidth == 64)
4065 IID = Intrinsic::x86_avx512_pternlog_q_128;
4066 else if (VecWidth == 256 && EltWidth == 64)
4067 IID = Intrinsic::x86_avx512_pternlog_q_256;
4068 else if (VecWidth == 512 && EltWidth == 64)
4069 IID = Intrinsic::x86_avx512_pternlog_q_512;
4070 else
4071 llvm_unreachable("Unexpected intrinsic");
4072
4073 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4074 CI->getArgOperand(2), CI->getArgOperand(3) };
4075 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4076 Args);
4077 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4078 : CI->getArgOperand(0);
4079 Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
4080 } else if (IsX86 && (Name.starts_with("avx512.mask.vpmadd52") ||
4081 Name.starts_with("avx512.maskz.vpmadd52"))) {
4082 bool ZeroMask = Name[11] == 'z';
4083 bool High = Name[20] == 'h' || Name[21] == 'h';
4084 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4085 Intrinsic::ID IID;
4086 if (VecWidth == 128 && !High)
4087 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
4088 else if (VecWidth == 256 && !High)
4089 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
4090 else if (VecWidth == 512 && !High)
4091 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
4092 else if (VecWidth == 128 && High)
4093 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
4094 else if (VecWidth == 256 && High)
4095 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
4096 else if (VecWidth == 512 && High)
4097 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
4098 else
4099 llvm_unreachable("Unexpected intrinsic");
4100
4101 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
4102 CI->getArgOperand(2) };
4103 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4104 Args);
4105 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4106 : CI->getArgOperand(0);
4107 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4108 } else if (IsX86 && (Name.starts_with("avx512.mask.vpermi2var.") ||
4109 Name.starts_with("avx512.mask.vpermt2var.") ||
4110 Name.starts_with("avx512.maskz.vpermt2var."))) {
4111 bool ZeroMask = Name[11] == 'z';
4112 bool IndexForm = Name[17] == 'i';
4113 Rep = upgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
4114 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpbusd.") ||
4115 Name.starts_with("avx512.maskz.vpdpbusd.") ||
4116 Name.starts_with("avx512.mask.vpdpbusds.") ||
4117 Name.starts_with("avx512.maskz.vpdpbusds."))) {
4118 bool ZeroMask = Name[11] == 'z';
4119 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4120 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4121 Intrinsic::ID IID;
4122 if (VecWidth == 128 && !IsSaturating)
4123 IID = Intrinsic::x86_avx512_vpdpbusd_128;
4124 else if (VecWidth == 256 && !IsSaturating)
4125 IID = Intrinsic::x86_avx512_vpdpbusd_256;
4126 else if (VecWidth == 512 && !IsSaturating)
4127 IID = Intrinsic::x86_avx512_vpdpbusd_512;
4128 else if (VecWidth == 128 && IsSaturating)
4129 IID = Intrinsic::x86_avx512_vpdpbusds_128;
4130 else if (VecWidth == 256 && IsSaturating)
4131 IID = Intrinsic::x86_avx512_vpdpbusds_256;
4132 else if (VecWidth == 512 && IsSaturating)
4133 IID = Intrinsic::x86_avx512_vpdpbusds_512;
4134 else
4135 llvm_unreachable("Unexpected intrinsic");
4136
4137 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4138 CI->getArgOperand(2) };
4139 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4140 Args);
4141 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4142 : CI->getArgOperand(0);
4143 Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
4144 } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpwssd.") ||
4145 Name.starts_with("avx512.maskz.vpdpwssd.") ||
4146 Name.starts_with("avx512.mask.vpdpwssds.") ||
4147 Name.starts_with("avx512.maskz.vpdpwssds."))) {
4148 bool ZeroMask = Name[11] == 'z';
4149 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
4150 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
4151 Intrinsic::ID IID;
4152 if (VecWidth == 128 && !IsSaturating)
4153 IID = Intrinsic::x86_avx512_vpdpwssd_128;
4154 else if (VecWidth == 256 && !IsSaturating)
4155 IID = Intrinsic::x86_avx512_vpdpwssd_256;
4156 else if (VecWidth == 512 && !IsSaturating)
4157 IID = Intrinsic::x86_avx512_vpdpwssd_512;
4158 else if (VecWidth == 128 && IsSaturating)
4159 IID = Intrinsic::x86_avx512_vpdpwssds_128;
4160 else if (VecWidth == 256 && IsSaturating)
4161 IID = Intrinsic::x86_avx512_vpdpwssds_256;
4162 else if (VecWidth == 512 && IsSaturating)
4163 IID = Intrinsic::x86_avx512_vpdpwssds_512;
4164 else
4165 llvm_unreachable("Unexpected intrinsic");
4166
4167 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
4168 CI->getArgOperand(2) };
4169 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
4170 Args);
4171 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
4172