[MS] Re-add support for the ARM interlocked bittest intrinscs

Adds support for these intrinsics, which are ARM and ARM64 only:
  _interlockedbittestandreset_acq
  _interlockedbittestandreset_rel
  _interlockedbittestandreset_nf
  _interlockedbittestandset_acq
  _interlockedbittestandset_rel
  _interlockedbittestandset_nf

Refactor the bittest intrinsic handling to decompose each intrinsic into
its action, its width, and its atomicity.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@334239 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Reid Kleckner 2018-06-07 21:39:04 +00:00
parent 2dbbac4ddd
commit c1c07cca8c
7 changed files with 300 additions and 90 deletions

View File

@ -791,10 +791,16 @@ LANGBUILTIN(_InterlockedOr, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset64, "UcWiD*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset64, "UcWiD*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset64, "UcWiD*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset_acq, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset_nf, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandreset_rel, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset64, "UcWiD*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset_acq, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset_nf, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_interlockedbittestandset_rel, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__popcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__popcnt, "UiUi", "nc", ALL_MS_LANGUAGES)

View File

@ -8154,6 +8154,8 @@ def err_x86_builtin_invalid_rounding : Error<
"invalid rounding argument">;
def err_x86_builtin_invalid_scale : Error<
"scale argument must be 1, 2, 4, or 8">;
def err_builtin_target_unsupported : Error<
"builtin is not supported on this target">;
def err_builtin_longjmp_unsupported : Error<
"__builtin_longjmp is not supported for the current target">;

View File

@ -484,58 +484,99 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown});
}
// Get properties of an X86 BT* assembly instruction. The first returned value
// is the action character code, which can be for complement, reset, or set. The
// second is the size suffix which our assembler needs. The last is whether to
// add the lock prefix.
static std::tuple<char, char, bool>
getBitTestActionSizeAndLocking(unsigned BuiltinID) {
switch (BuiltinID) {
case Builtin::BI_bittest:
return std::make_tuple('\0', 'l', false);
case Builtin::BI_bittestandcomplement:
return std::make_tuple('c', 'l', false);
case Builtin::BI_bittestandreset:
return std::make_tuple('r', 'l', false);
case Builtin::BI_bittestandset:
return std::make_tuple('s', 'l', false);
case Builtin::BI_interlockedbittestandreset:
return std::make_tuple('r', 'l', /*Locked=*/true);
case Builtin::BI_interlockedbittestandset:
return std::make_tuple('s', 'l', /*Locked=*/true);
namespace {
/// A struct to generically desribe a bit test intrinsic.
struct BitTest {
enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
enum InterlockingKind : uint8_t {
Unlocked,
Sequential,
Acquire,
Release,
NoFence
};
ActionKind Action;
InterlockingKind Interlocking;
bool Is64Bit;
static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
};
} // namespace
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
// Main portable variants.
case Builtin::BI_bittest:
return {TestOnly, Unlocked, false};
case Builtin::BI_bittestandcomplement:
return {Complement, Unlocked, false};
case Builtin::BI_bittestandreset:
return {Reset, Unlocked, false};
case Builtin::BI_bittestandset:
return {Set, Unlocked, false};
case Builtin::BI_interlockedbittestandreset:
return {Reset, Sequential, false};
case Builtin::BI_interlockedbittestandset:
return {Set, Sequential, false};
// X86-specific 64-bit variants.
case Builtin::BI_bittest64:
return std::make_tuple('\0', 'q', false);
return {TestOnly, Unlocked, true};
case Builtin::BI_bittestandcomplement64:
return std::make_tuple('c', 'q', false);
return {Complement, Unlocked, true};
case Builtin::BI_bittestandreset64:
return std::make_tuple('r', 'q', false);
return {Reset, Unlocked, true};
case Builtin::BI_bittestandset64:
return std::make_tuple('s', 'q', false);
return {Set, Unlocked, true};
case Builtin::BI_interlockedbittestandreset64:
return std::make_tuple('r', 'q', /*Locked=*/true);
return {Reset, Sequential, true};
case Builtin::BI_interlockedbittestandset64:
return std::make_tuple('s', 'q', /*Locked=*/true);
return {Set, Sequential, true};
// ARM/AArch64-specific ordering variants.
case Builtin::BI_interlockedbittestandset_acq:
return {Set, Acquire, false};
case Builtin::BI_interlockedbittestandset_rel:
return {Set, Release, false};
case Builtin::BI_interlockedbittestandset_nf:
return {Set, NoFence, false};
case Builtin::BI_interlockedbittestandreset_acq:
return {Reset, Acquire, false};
case Builtin::BI_interlockedbittestandreset_rel:
return {Reset, Release, false};
case Builtin::BI_interlockedbittestandreset_nf:
return {Reset, NoFence, false};
}
llvm_unreachable("expected only bittest builtins");
llvm_unreachable("expected only bittest intrinsics");
}
static RValue EmitX86BitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E, Value *BitBase,
Value *BitPos) {
char Action, Size;
bool Locked;
std::tie(Action, Size, Locked) = getBitTestActionSizeAndLocking(BuiltinID);
static char bitActionToX86BTCode(BitTest::ActionKind A) {
switch (A) {
case BitTest::TestOnly: return '\0';
case BitTest::Complement: return 'c';
case BitTest::Reset: return 'r';
case BitTest::Set: return 's';
}
llvm_unreachable("invalid action");
}
static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
BitTest BT,
const CallExpr *E, Value *BitBase,
Value *BitPos) {
char Action = bitActionToX86BTCode(BT.Action);
char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
// Build the assembly.
SmallString<64> Asm;
raw_svector_ostream AsmOS(Asm);
if (Locked)
if (BT.Interlocking != BitTest::Unlocked)
AsmOS << "lock ";
AsmOS << "bt";
if (Action)
AsmOS << Action;
AsmOS << Size << " $2, ($1)\n\tsetc ${0:b}";
AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
// Build the constraints. FIXME: We should support immediates when possible.
std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
@ -548,24 +589,38 @@ static RValue EmitX86BitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID,
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
CallSite CS = CGF.Builder.CreateCall(IA, {BitBase, BitPos});
return RValue::get(CS.getInstruction());
return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
}
static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
switch (I) {
case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
case BitTest::Release: return llvm::AtomicOrdering::Release;
case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
}
llvm_unreachable("invalid interlocking");
}
/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
/// bits and a bit position and read and optionally modify the bit at that
/// position. The position index can be arbitrarily large, i.e. it can be larger
/// than 31 or 63, so we need an indexed load in the general case.
static RValue EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E) {
static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) {
Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
// X86 has special BT, BTC, BTR, and BTS instructions that handle the array
// indexing operation internally. Use them if possible.
llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
return EmitX86BitTestIntrinsic(CGF, BuiltinID, E, BitBase, BitPos);
return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
// Otherwise, use generic code to load one byte and test the bit. Use all but
// the bottom three bits as the array index, and the bottom three bits to form
@ -583,54 +638,42 @@ static RValue EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID,
// The updating instructions will need a mask.
Value *Mask = nullptr;
if (BuiltinID != Builtin::BI_bittest && BuiltinID != Builtin::BI_bittest64) {
if (BT.Action != BitTest::TestOnly) {
Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
"bittest.mask");
}
// Emit a combined atomicrmw load/store operation for the interlocked
// intrinsics.
Value *OldByte = nullptr;
switch (BuiltinID) {
case Builtin::BI_interlockedbittestandreset:
case Builtin::BI_interlockedbittestandreset64:
OldByte = CGF.Builder.CreateAtomicRMW(
AtomicRMWInst::And, ByteAddr.getPointer(), CGF.Builder.CreateNot(Mask),
llvm::AtomicOrdering::SequentiallyConsistent);
break;
case Builtin::BI_interlockedbittestandset:
case Builtin::BI_interlockedbittestandset64:
OldByte = CGF.Builder.CreateAtomicRMW(
AtomicRMWInst::Or, ByteAddr.getPointer(), Mask,
llvm::AtomicOrdering::SequentiallyConsistent);
break;
default:
break;
}
// Check the action and ordering of the interlocked intrinsics.
llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
// Emit a plain load for the non-interlocked intrinsics.
if (!OldByte) {
Value *OldByte = nullptr;
if (Ordering != llvm::AtomicOrdering::NotAtomic) {
// Emit a combined atomicrmw load/store operation for the interlocked
// intrinsics.
llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
if (BT.Action == BitTest::Reset) {
Mask = CGF.Builder.CreateNot(Mask);
RMWOp = llvm::AtomicRMWInst::And;
}
OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
Ordering);
} else {
// Emit a plain load for the non-interlocked intrinsics.
OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
Value *NewByte = nullptr;
switch (BuiltinID) {
case Builtin::BI_bittest:
case Builtin::BI_bittest64:
switch (BT.Action) {
case BitTest::TestOnly:
// Don't store anything.
break;
case Builtin::BI_bittestandcomplement:
case Builtin::BI_bittestandcomplement64:
case BitTest::Complement:
NewByte = CGF.Builder.CreateXor(OldByte, Mask);
break;
case Builtin::BI_bittestandreset:
case Builtin::BI_bittestandreset64:
case BitTest::Reset:
NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
break;
case Builtin::BI_bittestandset:
case Builtin::BI_bittestandset64:
case BitTest::Set:
NewByte = CGF.Builder.CreateOr(OldByte, Mask);
break;
default:
llvm_unreachable("non bittest family builtin");
}
if (NewByte)
CGF.Builder.CreateStore(NewByte, ByteAddr);
@ -639,8 +682,8 @@ static RValue EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID,
// However we loaded the old byte, either by plain load or atomicrmw, shift
// the bit into the low position and mask it to 0 or 1.
Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
return RValue::get(CGF.Builder.CreateAnd(
ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"));
return CGF.Builder.CreateAnd(
ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
}
namespace {
@ -2992,7 +3035,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
case Builtin::BI_interlockedbittestandset:
return EmitBitTestIntrinsic(*this, BuiltinID, E);
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:

View File

@ -521,6 +521,23 @@ _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) {
}
#endif
/*----------------------------------------------------------------------------*\
|* Bit Counting and Testing
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
long _BitPos);
unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
long _BitPos);
unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
long _BitPos);
unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
long _BitPos);
unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
long _BitPos);
unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
long _BitPos);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Or
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)

View File

@ -851,6 +851,20 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
return false;
}
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
llvm::Triple::ArchType CurArch =
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
return false;
S.Diag(TheCall->getLocStart(), diag::err_builtin_target_unsupported)
<< TheCall->getSourceRange();
return true;
}
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@ -901,6 +915,33 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
break;
}
// The acquire, release, and no fence variants are ARM and AArch64 only.
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
if (CheckBuiltinTargetSupport(
*this, BuiltinID, TheCall,
{llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
// The 64-bit bittest variants are x64, ARM, and AArch64 only.
case Builtin::BI_bittest64:
case Builtin::BI_bittestandcomplement64:
case Builtin::BI_bittestandreset64:
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
{llvm::Triple::x86_64, llvm::Triple::arm,
llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
break;
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:

View File

@ -10,7 +10,9 @@ void test32(long *base, long idx) {
sink = _bittestandset(base, idx);
sink = _interlockedbittestandreset(base, idx);
sink = _interlockedbittestandset(base, idx);
sink = _interlockedbittestandset(base, idx);
}
void test64(__int64 *base, __int64 idx) {
sink = _bittest64(base, idx);
sink = _bittestandcomplement64(base, idx);
@ -20,6 +22,17 @@ void test64(__int64 *base, __int64 idx) {
sink = _interlockedbittestandset64(base, idx);
}
#if defined(_M_ARM) || defined(_M_ARM64)
void test_arm(long *base, long idx) {
sink = _interlockedbittestandreset_acq(base, idx);
sink = _interlockedbittestandreset_rel(base, idx);
sink = _interlockedbittestandreset_nf(base, idx);
sink = _interlockedbittestandset_acq(base, idx);
sink = _interlockedbittestandset_rel(base, idx);
sink = _interlockedbittestandset_nf(base, idx);
}
#endif
// X64-LABEL: define dso_local void @test32(i32* %base, i32 %idx)
// X64: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
// X64: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}})
@ -110,15 +123,13 @@ void test64(__int64 *base, __int64 idx) {
// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
// ARM: store volatile i8 %[[RES]], i8* @sink, align 1
// ARM-LABEL: define dso_local {{.*}}void @test64(i64* %base, i64 %idx)
// ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3
// ARM: %[[BASE:[^ ]*]] = bitcast i64* %{{.*}} to i8*
// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i64 %[[IDXHI]]
// ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8
// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7
// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1
// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]]
// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1
// ARM: store volatile i8 %[[RES]], i8* @sink, align 1
// ... the rest is the same, but with i64 instead of i32.
// Just look for the atomicrmw instructions.
// ARM-LABEL: define dso_local {{.*}}void @test_arm(i32* %base, i32 %idx)
// ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} acquire
// ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} release
// ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} monotonic
// ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} acquire
// ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} release
// ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} monotonic

View File

@ -0,0 +1,84 @@
// RUN: %clang_cc1 -ffreestanding -fms-compatibility -fms-compatibility-version=19 -verify %s -triple i686-windows-msvc -fms-extensions -DTEST_X86
// RUN: %clang_cc1 -ffreestanding -fms-compatibility -fms-compatibility-version=19 -verify %s -triple x86_64-windows-msvc -fms-extensions -DTEST_X64
// RUN: %clang_cc1 -ffreestanding -fms-compatibility -fms-compatibility-version=19 -verify %s -triple arm-windows-msvc -fms-extensions -DTEST_ARM
// RUN: %clang_cc1 -ffreestanding -fms-compatibility -fms-compatibility-version=19 -verify %s -triple thumbv7-windows-msvc -fms-extensions -DTEST_ARM
// RUN: %clang_cc1 -ffreestanding -fms-compatibility -fms-compatibility-version=19 -verify %s -triple aarch64-windows-msvc -fms-extensions -DTEST_ARM
#include <intrin.h>
extern unsigned char sink;
#ifdef TEST_X86
void x86(long *bits, __int64 *bits64, long bitidx) {
sink = _bittest(bits, bitidx);
sink = _bittestandcomplement(bits, bitidx);
sink = _bittestandreset(bits, bitidx);
sink = _bittestandset(bits, bitidx);
sink = _interlockedbittestandreset(bits, bitidx);
sink = _interlockedbittestandset(bits, bitidx);
sink = _bittest64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _bittestandcomplement64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _bittestandreset64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _bittestandset64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset64(bits64, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset_acq(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset_rel(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset_nf(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_acq(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_rel(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_nf(bits, bitidx); // expected-error {{builtin is not supported on this target}}
}
#endif
#ifdef TEST_X64
void x64(long *bits, __int64 *bits64, long bitidx) {
sink = _bittest(bits, bitidx);
sink = _bittestandcomplement(bits, bitidx);
sink = _bittestandreset(bits, bitidx);
sink = _bittestandset(bits, bitidx);
sink = _interlockedbittestandreset(bits, bitidx);
sink = _interlockedbittestandset(bits, bitidx);
sink = _bittest64(bits64, bitidx);
sink = _bittestandcomplement64(bits64, bitidx);
sink = _bittestandreset64(bits64, bitidx);
sink = _bittestandset64(bits64, bitidx);
sink = _interlockedbittestandreset64(bits64, bitidx);
sink = _interlockedbittestandset64(bits64, bitidx);
sink = _interlockedbittestandreset_acq(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset_rel(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandreset_nf(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_acq(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_rel(bits, bitidx); // expected-error {{builtin is not supported on this target}}
sink = _interlockedbittestandset_nf(bits, bitidx); // expected-error {{builtin is not supported on this target}}
}
#endif
#ifdef TEST_ARM
// expected-no-diagnostics
void arm(long *bits, __int64 *bits64, long bitidx) {
sink = _bittest(bits, bitidx);
sink = _bittestandcomplement(bits, bitidx);
sink = _bittestandreset(bits, bitidx);
sink = _bittestandset(bits, bitidx);
sink = _interlockedbittestandreset(bits, bitidx);
sink = _interlockedbittestandset(bits, bitidx);
sink = _bittest64(bits64, bitidx);
sink = _bittestandcomplement64(bits64, bitidx);
sink = _bittestandreset64(bits64, bitidx);
sink = _bittestandset64(bits64, bitidx);
sink = _interlockedbittestandreset64(bits64, bitidx);
sink = _interlockedbittestandset64(bits64, bitidx);
sink = _interlockedbittestandreset_acq(bits, bitidx);
sink = _interlockedbittestandreset_rel(bits, bitidx);
sink = _interlockedbittestandreset_nf(bits, bitidx);
sink = _interlockedbittestandset_acq(bits, bitidx);
sink = _interlockedbittestandset_rel(bits, bitidx);
sink = _interlockedbittestandset_nf(bits, bitidx);
}
#endif