Prechádzať zdrojové kódy

[AtomicExpand] Allow libcall expansion for non-zero address spaces

Be consistent about how we treat atomics in non-zero address spaces.  If we get to the backend, we tend to lower them as if in address space 0.  Do the same if we need to insert a libcall instead.

Differential Revision: https://reviews.llvm.org/D58760



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@355453 91177308-0d34-0410-b5e6-96231b3b80d8
Philip Reames 6 rokov pred
rodič
commit
b58c983628

+ 8 - 2
lib/CodeGen/AtomicExpandPass.cpp

@@ -1691,8 +1691,14 @@ bool AtomicExpand::expandAtomicOpToLibcall(
   }
   }
 
 
   // 'ptr' argument.
   // 'ptr' argument.
-  Value *PtrVal =
-      Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx));
+  // note: This assumes all address spaces share a common libfunc
+  // implementation and that addresses are convertable.  For systems without
+  // that property, we'd need to extend this mechanism to support AS-specific
+  // families of atomic intrinsics.
+  auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace();
+  Value *PtrVal = Builder.CreateBitCast(PointerOperand,
+                                        Type::getInt8PtrTy(Ctx, PtrTypeAS));
+  PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx));
   Args.push_back(PtrVal);
   Args.push_back(PtrVal);
 
 
   // 'expected' argument, if present.
   // 'expected' argument, if present.

+ 36 - 0
test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll

@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+
+
+define i256 @atomic_load256_libcall(i256* %ptr) nounwind {
+; CHECK-LABEL: @atomic_load256_libcall(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i256* [[PTR:%.*]] to i8*
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i256, align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i256* [[TMP2]] to i8*
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 32, i8* [[TMP3]])
+; CHECK-NEXT:    call void @__atomic_load(i64 32, i8* [[TMP1]], i8* [[TMP3]], i32 0)
+; CHECK-NEXT:    [[TMP4:%.*]] = load i256, i256* [[TMP2]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 32, i8* [[TMP3]])
+; CHECK-NEXT:    ret i256 [[TMP4]]
+;
+  %result = load atomic i256, i256* %ptr unordered, align 16
+  ret i256 %result
+}
+
+define i256 @atomic_load256_libcall_as1(i256 addrspace(1)* %ptr) nounwind {
+; CHECK-LABEL: @atomic_load256_libcall_as1(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i256 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8*
+; CHECK-NEXT:    [[TMP3:%.*]] = alloca i256, align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i256* [[TMP3]] to i8*
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 32, i8* [[TMP4]])
+; CHECK-NEXT:    call void @__atomic_load(i64 32, i8* [[TMP2]], i8* [[TMP4]], i32 0)
+; CHECK-NEXT:    [[TMP5:%.*]] = load i256, i256* [[TMP3]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 32, i8* [[TMP4]])
+; CHECK-NEXT:    ret i256 [[TMP5]]
+;
+  %result = load atomic i256, i256 addrspace(1)* %ptr unordered, align 16
+  ret i256 %result
+}
+
+; CHECK: fail