overflow-intrinsics.ll 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
  2. ; RUN: opt -codegenprepare -S < %s | FileCheck %s
  3. ; RUN: opt -enable-debugify -codegenprepare -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
  4. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
  5. target triple = "x86_64-apple-darwin10.0.0"
  6. define i64 @uaddo1(i64 %a, i64 %b) nounwind ssp {
  7. ; CHECK-LABEL: @uaddo1(
  8. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
  9. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  10. ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  11. ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
  12. ; CHECK-NEXT: ret i64 [[Q]]
  13. ;
  14. %add = add i64 %b, %a
  15. %cmp = icmp ult i64 %add, %a
  16. %Q = select i1 %cmp, i64 %b, i64 42
  17. ret i64 %Q
  18. }
  19. define i64 @uaddo2(i64 %a, i64 %b) nounwind ssp {
  20. ; CHECK-LABEL: @uaddo2(
  21. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
  22. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  23. ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  24. ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
  25. ; CHECK-NEXT: ret i64 [[Q]]
  26. ;
  27. %add = add i64 %b, %a
  28. %cmp = icmp ult i64 %add, %b
  29. %Q = select i1 %cmp, i64 %b, i64 42
  30. ret i64 %Q
  31. }
  32. define i64 @uaddo3(i64 %a, i64 %b) nounwind ssp {
  33. ; CHECK-LABEL: @uaddo3(
  34. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
  35. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  36. ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  37. ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
  38. ; CHECK-NEXT: ret i64 [[Q]]
  39. ;
  40. %add = add i64 %b, %a
  41. %cmp = icmp ugt i64 %b, %add
  42. %Q = select i1 %cmp, i64 %b, i64 42
  43. ret i64 %Q
  44. }
  45. ; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic.
  46. define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp {
  47. ; CHECK-LABEL: @uaddo4(
  48. ; CHECK-NEXT: entry:
  49. ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
  50. ; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
  51. ; CHECK: next:
  52. ; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
  53. ; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
  54. ; CHECK-NEXT: ret i64 [[Q]]
  55. ; CHECK: exit:
  56. ; CHECK-NEXT: ret i64 0
  57. ;
  58. entry:
  59. %add = add i64 %b, %a
  60. %cmp = icmp ugt i64 %b, %add
  61. br i1 %c, label %next, label %exit
  62. next:
  63. %Q = select i1 %cmp, i64 %b, i64 42
  64. ret i64 %Q
  65. exit:
  66. ret i64 0
  67. }
  68. define i64 @uaddo5(i64 %a, i64 %b, i64* %ptr, i1 %c) nounwind ssp {
  69. ; CHECK-LABEL: @uaddo5(
  70. ; CHECK-NEXT: entry:
  71. ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
  72. ; CHECK-NEXT: store i64 [[ADD]], i64* [[PTR:%.*]]
  73. ; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
  74. ; CHECK: next:
  75. ; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
  76. ; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
  77. ; CHECK-NEXT: ret i64 [[Q]]
  78. ; CHECK: exit:
  79. ; CHECK-NEXT: ret i64 0
  80. ;
  81. entry:
  82. %add = add i64 %b, %a
  83. store i64 %add, i64* %ptr
  84. %cmp = icmp ugt i64 %b, %add
  85. br i1 %c, label %next, label %exit
  86. next:
  87. %Q = select i1 %cmp, i64 %b, i64 42
  88. ret i64 %Q
  89. exit:
  90. ret i64 0
  91. }
  92. ; When adding 1, the general pattern for add-overflow may be different due to icmp canonicalization.
  93. ; PR31754: https://bugs.llvm.org/show_bug.cgi?id=31754
  94. define i1 @uaddo_i64_increment(i64 %x, i64* %p) {
  95. ; CHECK-LABEL: @uaddo_i64_increment(
  96. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
  97. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  98. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  99. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  100. ; CHECK-NEXT: ret i1 [[OV1]]
  101. ;
  102. %a = add i64 %x, 1
  103. %ov = icmp eq i64 %a, 0
  104. store i64 %a, i64* %p
  105. ret i1 %ov
  106. }
  107. define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, i8* %p) {
  108. ; CHECK-LABEL: @uaddo_i8_increment_noncanonical_1(
  109. ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 1, i8 [[X:%.*]])
  110. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
  111. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
  112. ; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
  113. ; CHECK-NEXT: ret i1 [[OV1]]
  114. ;
  115. %a = add i8 1, %x ; commute
  116. %ov = icmp eq i8 %a, 0
  117. store i8 %a, i8* %p
  118. ret i1 %ov
  119. }
  120. define i1 @uaddo_i32_increment_noncanonical_2(i32 %x, i32* %p) {
  121. ; CHECK-LABEL: @uaddo_i32_increment_noncanonical_2(
  122. ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
  123. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
  124. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
  125. ; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
  126. ; CHECK-NEXT: ret i1 [[OV1]]
  127. ;
  128. %a = add i32 %x, 1
  129. %ov = icmp eq i32 0, %a ; commute
  130. store i32 %a, i32* %p
  131. ret i1 %ov
  132. }
  133. define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, i16* %p) {
  134. ; CHECK-LABEL: @uaddo_i16_increment_noncanonical_3(
  135. ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 1, i16 [[X:%.*]])
  136. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
  137. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
  138. ; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
  139. ; CHECK-NEXT: ret i1 [[OV1]]
  140. ;
  141. %a = add i16 1, %x ; commute
  142. %ov = icmp eq i16 0, %a ; commute
  143. store i16 %a, i16* %p
  144. ret i1 %ov
  145. }
  146. ; The overflow check may be against the input rather than the sum.
  147. define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
  148. ; CHECK-LABEL: @uaddo_i64_increment_alt(
  149. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
  150. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  151. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  152. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  153. ; CHECK-NEXT: ret i1 [[OV1]]
  154. ;
  155. %a = add i64 %x, 1
  156. store i64 %a, i64* %p
  157. %ov = icmp eq i64 %x, -1
  158. ret i1 %ov
  159. }
  160. ; Make sure insertion is done correctly based on dominance.
  161. define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
  162. ; CHECK-LABEL: @uaddo_i64_increment_alt_dom(
  163. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
  164. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  165. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  166. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  167. ; CHECK-NEXT: ret i1 [[OV1]]
  168. ;
  169. %ov = icmp eq i64 %x, -1
  170. %a = add i64 %x, 1
  171. store i64 %a, i64* %p
  172. ret i1 %ov
  173. }
  174. ; The overflow check may be against the input rather than the sum.
  175. define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
  176. ; CHECK-LABEL: @uaddo_i64_decrement_alt(
  177. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
  178. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  179. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  180. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  181. ; CHECK-NEXT: ret i1 [[OV1]]
  182. ;
  183. %a = add i64 %x, -1
  184. store i64 %a, i64* %p
  185. %ov = icmp ne i64 %x, 0
  186. ret i1 %ov
  187. }
  188. ; Make sure insertion is done correctly based on dominance.
  189. define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
  190. ; CHECK-LABEL: @uaddo_i64_decrement_alt_dom(
  191. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
  192. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  193. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  194. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  195. ; CHECK-NEXT: ret i1 [[OV1]]
  196. ;
  197. %ov = icmp ne i64 %x, 0
  198. %a = add i64 %x, -1
  199. store i64 %a, i64* %p
  200. ret i1 %ov
  201. }
  202. ; No transform for illegal types.
  203. define i1 @uaddo_i42_increment_illegal_type(i42 %x, i42* %p) {
  204. ; CHECK-LABEL: @uaddo_i42_increment_illegal_type(
  205. ; CHECK-NEXT: [[A:%.*]] = add i42 [[X:%.*]], 1
  206. ; CHECK-NEXT: [[OV:%.*]] = icmp eq i42 [[A]], 0
  207. ; CHECK-NEXT: store i42 [[A]], i42* [[P:%.*]]
  208. ; CHECK-NEXT: ret i1 [[OV]]
  209. ;
  210. %a = add i42 %x, 1
  211. %ov = icmp eq i42 %a, 0
  212. store i42 %a, i42* %p
  213. ret i1 %ov
  214. }
  215. define i1 @usubo_ult_i64(i64 %x, i64 %y, i64* %p) {
  216. ; CHECK-LABEL: @usubo_ult_i64(
  217. ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
  218. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
  219. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
  220. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  221. ; CHECK-NEXT: ret i1 [[OV1]]
  222. ;
  223. %s = sub i64 %x, %y
  224. store i64 %s, i64* %p
  225. %ov = icmp ult i64 %x, %y
  226. ret i1 %ov
  227. }
  228. ; Verify insertion point for single-BB. Toggle predicate.
  229. define i1 @usubo_ugt_i32(i32 %x, i32 %y, i32* %p) {
  230. ; CHECK-LABEL: @usubo_ugt_i32(
  231. ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
  232. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
  233. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
  234. ; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
  235. ; CHECK-NEXT: ret i1 [[OV1]]
  236. ;
  237. %ov = icmp ugt i32 %y, %x
  238. %s = sub i32 %x, %y
  239. store i32 %s, i32* %p
  240. ret i1 %ov
  241. }
  242. ; Constant operand should match.
  243. define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) {
  244. ; CHECK-LABEL: @usubo_ugt_constant_op0_i8(
  245. ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 42, i8 [[X:%.*]])
  246. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
  247. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
  248. ; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
  249. ; CHECK-NEXT: ret i1 [[OV1]]
  250. ;
  251. %s = sub i8 42, %x
  252. %ov = icmp ugt i8 %x, 42
  253. store i8 %s, i8* %p
  254. ret i1 %ov
  255. }
  256. ; Compare with constant operand 0 is canonicalized by commuting, but verify match for non-canonical form.
  257. define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) {
  258. ; CHECK-LABEL: @usubo_ult_constant_op0_i16(
  259. ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 43, i16 [[X:%.*]])
  260. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
  261. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
  262. ; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
  263. ; CHECK-NEXT: ret i1 [[OV1]]
  264. ;
  265. %s = sub i16 43, %x
  266. %ov = icmp ult i16 43, %x
  267. store i16 %s, i16* %p
  268. ret i1 %ov
  269. }
  270. ; Subtract with constant operand 1 is canonicalized to add.
  271. define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) {
  272. ; CHECK-LABEL: @usubo_ult_constant_op1_i16(
  273. ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 [[X:%.*]], i16 44)
  274. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
  275. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
  276. ; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
  277. ; CHECK-NEXT: ret i1 [[OV1]]
  278. ;
  279. %s = add i16 %x, -44
  280. %ov = icmp ult i16 %x, 44
  281. store i16 %s, i16* %p
  282. ret i1 %ov
  283. }
  284. define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) {
  285. ; CHECK-LABEL: @usubo_ugt_constant_op1_i8(
  286. ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X:%.*]], i8 45)
  287. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
  288. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
  289. ; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
  290. ; CHECK-NEXT: ret i1 [[OV1]]
  291. ;
  292. %ov = icmp ugt i8 45, %x
  293. %s = add i8 %x, -45
  294. store i8 %s, i8* %p
  295. ret i1 %ov
  296. }
  297. ; Special-case: subtract 1 changes the compare predicate and constant.
  298. define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) {
  299. ; CHECK-LABEL: @usubo_eq_constant1_op1_i32(
  300. ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 1)
  301. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
  302. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
  303. ; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
  304. ; CHECK-NEXT: ret i1 [[OV1]]
  305. ;
  306. %s = add i32 %x, -1
  307. %ov = icmp eq i32 %x, 0
  308. store i32 %s, i32* %p
  309. ret i1 %ov
  310. }
  311. ; Special-case: subtract from 0 (negate) changes the compare predicate.
  312. define i1 @usubo_ne_constant0_op1_i32(i32 %x, i32* %p) {
  313. ; CHECK-LABEL: @usubo_ne_constant0_op1_i32(
  314. ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 0, i32 [[X:%.*]])
  315. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
  316. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
  317. ; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
  318. ; CHECK-NEXT: ret i1 [[OV1]]
  319. ;
  320. %s = sub i32 0, %x
  321. %ov = icmp ne i32 %x, 0
  322. store i32 %s, i32* %p
  323. ret i1 %ov
  324. }
  325. ; This used to verify insertion point for multi-BB, but now we just bail out.
  326. declare void @call(i1)
  327. define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
  328. ; CHECK-LABEL: @usubo_ult_sub_dominates_i64(
  329. ; CHECK-NEXT: entry:
  330. ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
  331. ; CHECK: t:
  332. ; CHECK-NEXT: [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
  333. ; CHECK-NEXT: store i64 [[S]], i64* [[P:%.*]]
  334. ; CHECK-NEXT: br i1 [[COND]], label [[END:%.*]], label [[F]]
  335. ; CHECK: f:
  336. ; CHECK-NEXT: ret i1 [[COND]]
  337. ; CHECK: end:
  338. ; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
  339. ; CHECK-NEXT: ret i1 [[OV]]
  340. ;
  341. entry:
  342. br i1 %cond, label %t, label %f
  343. t:
  344. %s = sub i64 %x, %y
  345. store i64 %s, i64* %p
  346. br i1 %cond, label %end, label %f
  347. f:
  348. ret i1 %cond
  349. end:
  350. %ov = icmp ult i64 %x, %y
  351. ret i1 %ov
  352. }
  353. define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
  354. ; CHECK-LABEL: @usubo_ult_cmp_dominates_i64(
  355. ; CHECK-NEXT: entry:
  356. ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
  357. ; CHECK: t:
  358. ; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X:%.*]], [[Y:%.*]]
  359. ; CHECK-NEXT: call void @call(i1 [[OV]])
  360. ; CHECK-NEXT: br i1 [[OV]], label [[END:%.*]], label [[F]]
  361. ; CHECK: f:
  362. ; CHECK-NEXT: ret i1 [[COND]]
  363. ; CHECK: end:
  364. ; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X]], i64 [[Y]])
  365. ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
  366. ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
  367. ; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
  368. ; CHECK-NEXT: ret i1 [[OV1]]
  369. ;
  370. entry:
  371. br i1 %cond, label %t, label %f
  372. t:
  373. %ov = icmp ult i64 %x, %y
  374. call void @call(i1 %ov)
  375. br i1 %ov, label %end, label %f
  376. f:
  377. ret i1 %cond
  378. end:
  379. %s = sub i64 %x, %y
  380. store i64 %s, i64* %p
  381. ret i1 %ov
  382. }
  383. ; Verify that crazy/non-canonical code does not crash.
  384. define void @bar() {
  385. ; CHECK-LABEL: @bar(
  386. ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, -1
  387. ; CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
  388. ; CHECK-NEXT: unreachable
  389. ;
  390. %cmp = icmp eq i64 1, -1
  391. %frombool = zext i1 %cmp to i8
  392. unreachable
  393. }
  394. define void @foo() {
  395. ; CHECK-LABEL: @foo(
  396. ; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, 1
  397. ; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[SUB]] to i32
  398. ; CHECK-NEXT: unreachable
  399. ;
  400. %sub = add nsw i64 1, 1
  401. %conv = trunc i64 %sub to i32
  402. unreachable
  403. }
  404. ; Similarly for usubo.
  405. define i1 @bar2() {
  406. ; CHECK-LABEL: @bar2(
  407. ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, 0
  408. ; CHECK-NEXT: ret i1 [[CMP]]
  409. ;
  410. %cmp = icmp eq i64 1, 0
  411. ret i1 %cmp
  412. }
  413. define i64 @foo2(i8 *%p) {
  414. ; CHECK-LABEL: @foo2(
  415. ; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, -1
  416. ; CHECK-NEXT: ret i64 [[SUB]]
  417. ;
  418. %sub = add nsw i64 1, -1
  419. ret i64 %sub
  420. }
  421. ; Avoid hoisting a math op into a dominating block which would
  422. ; increase the critical path.
  423. define void @PR41129(i64* %p64) {
  424. ; CHECK-LABEL: @PR41129(
  425. ; CHECK-NEXT: entry:
  426. ; CHECK-NEXT: [[KEY:%.*]] = load i64, i64* [[P64:%.*]], align 8
  427. ; CHECK-NEXT: [[COND17:%.*]] = icmp eq i64 [[KEY]], 0
  428. ; CHECK-NEXT: br i1 [[COND17]], label [[TRUE:%.*]], label [[FALSE:%.*]]
  429. ; CHECK: false:
  430. ; CHECK-NEXT: [[ANDVAL:%.*]] = and i64 [[KEY]], 7
  431. ; CHECK-NEXT: store i64 [[ANDVAL]], i64* [[P64]]
  432. ; CHECK-NEXT: br label [[EXIT:%.*]]
  433. ; CHECK: true:
  434. ; CHECK-NEXT: [[SVALUE:%.*]] = add i64 [[KEY]], -1
  435. ; CHECK-NEXT: store i64 [[SVALUE]], i64* [[P64]]
  436. ; CHECK-NEXT: br label [[EXIT]]
  437. ; CHECK: exit:
  438. ; CHECK-NEXT: ret void
  439. ;
  440. entry:
  441. %key = load i64, i64* %p64, align 8
  442. %cond17 = icmp eq i64 %key, 0
  443. br i1 %cond17, label %true, label %false
  444. false:
  445. %andval = and i64 %key, 7
  446. store i64 %andval, i64* %p64
  447. br label %exit
  448. true:
  449. %svalue = add i64 %key, -1
  450. store i64 %svalue, i64* %p64
  451. br label %exit
  452. exit:
  453. ret void
  454. }
  455. ; Check that every instruction inserted by -codegenprepare has a debug location.
  456. ; DEBUG: CheckModuleDebugify: PASS