浏览代码

Revert "[OPENMP] Delayed diagnostics for VLA support."

This reverts commit r354679 to fix the problem with the Windows
buildbots

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@354680 91177308-0d34-0410-b5e6-96231b3b80d8
Alexey Bataev 6 年之前
父节点
当前提交
e8de81fa9a
共有 3 个文件被更改,包括 12 次插入20 次删除
  1. 9 7
      lib/Sema/SemaType.cpp
  2. 1 4
      test/OpenMP/target_vla_messages.cpp
  3. 2 9
      test/SemaCUDA/vla.cu

+ 9 - 7
lib/Sema/SemaType.cpp

@@ -2250,13 +2250,15 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
   }
   }
 
 
   if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
   if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
-    // CUDA device code and some other targets don't support VLAs.
-    targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
-                        ? diag::err_cuda_vla
-                        : diag::err_vla_unsupported)
-        << ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
-                ? CurrentCUDATarget()
-                : CFT_InvalidTarget);
+    if (getLangOpts().CUDA) {
+      // CUDA device code doesn't support VLAs.
+      CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget();
+    } else if (!getLangOpts().OpenMP ||
+               shouldDiagnoseTargetSupportFromOpenMP()) {
+      // Some targets don't support VLAs.
+      Diag(Loc, diag::err_vla_unsupported);
+      return QualType();
+    }
   }
   }
 
 
   // If this is not C99, extwarn about VLA's and C99 array size modifiers.
   // If this is not C99, extwarn about VLA's and C99 array size modifiers.

+ 1 - 4
test/OpenMP/target_vla_messages.cpp

@@ -47,7 +47,7 @@ void target_template(int arg) {
 #pragma omp target
 #pragma omp target
   {
   {
 #ifdef NO_VLA
 #ifdef NO_VLA
-    // expected-error@+2 2 {{variable length arrays are not supported for the current target}}
+    // expected-error@+2 {{variable length arrays are not supported for the current target}}
 #endif
 #endif
     T vla[arg];
     T vla[arg];
   }
   }
@@ -73,9 +73,6 @@ void target(int arg) {
     }
     }
   }
   }
 
 
-#ifdef NO_VLA
-    // expected-note@+2 {{in instantiation of function template specialization 'target_template<long>' requested here}}
-#endif
   target_template<long>(arg);
   target_template<long>(arg);
 }
 }
 
 

+ 2 - 9
test/SemaCUDA/vla.cu

@@ -1,9 +1,5 @@
 // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -verify %s
 // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -verify %s
-// RUN: %clang_cc1 -triple x86_64-unknown-linux -verify -DHOST %s
-
-#ifndef __CUDA_ARCH__
-// expected-no-diagnostics
-#endif
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -verify -DHOST %s
 
 
 #include "Inputs/cuda.h"
 #include "Inputs/cuda.h"
 
 
@@ -12,10 +8,7 @@ void host(int n) {
 }
 }
 
 
 __device__ void device(int n) {
 __device__ void device(int n) {
-  int x[n];
-#ifdef __CUDA_ARCH__
-  // expected-error@-2 {{cannot use variable-length arrays in __device__ functions}}
-#endif
+  int x[n];  // expected-error {{cannot use variable-length arrays in __device__ functions}}
 }
 }
 
 
 __host__ __device__ void hd(int n) {
 __host__ __device__ void hd(int n) {