[X86] Remove X86ProcFamilyEnum::IntelSLM

Replace X86ProcFamilyEnum::IntelSLM enum with a TuningUseSLMArithCosts flag instead, matching what we already do for Goldmont.

This just leaves X86ProcFamilyEnum::IntelAtom to replace with general Tuning/Feature flags and we can finally get rid of the old X86ProcFamilyEnum enum.

Differential Revision: https://reviews.llvm.org/D112079
This commit is contained in:
Simon Pilgrim 2021-10-20 11:08:54 +01:00
parent 956df6fa62
commit 9fc523d114
4 changed files with 18 additions and 14 deletions

View File

@ -571,6 +571,10 @@ def TuningFastMOVBE
: SubtargetFeature<"fast-movbe", "HasFastMOVBE", "true",
"Prefer a movbe over a single-use load + bswap / single-use bswap + store">;
def TuningUseSLMArithCosts
: SubtargetFeature<"use-slm-arith-costs", "UseSLMArithCosts", "true",
"Use Silvermont specific arithmetic costs">;
def TuningUseGLMDivSqrtCosts
: SubtargetFeature<"use-glm-div-sqrt-costs", "UseGLMDivSqrtCosts", "true",
"Use Goldmont specific floating point div/sqrt costs">;
@ -586,8 +590,6 @@ def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true",
// Bonnell
def ProcIntelAtom : SubtargetFeature<"", "X86ProcFamily", "IntelAtom", "">;
// Silvermont
def ProcIntelSLM : SubtargetFeature<"", "X86ProcFamily", "IntelSLM", "">;
//===----------------------------------------------------------------------===//
// Register File Description
@ -894,7 +896,7 @@ def ProcessorFeatures {
FeaturePCLMUL,
FeaturePRFCHW,
FeatureRDRAND];
list<SubtargetFeature> SLMTuning = [ProcIntelSLM,
list<SubtargetFeature> SLMTuning = [TuningUseSLMArithCosts,
TuningSlowTwoMemOps,
TuningSlowLEA,
TuningSlowIncDec,

View File

@ -54,8 +54,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
// are not a good idea. We should be migrating away from these.
enum X86ProcFamilyEnum {
Others,
IntelAtom,
IntelSLM
IntelAtom
};
enum X86SSEEnum {
@ -506,6 +505,9 @@ class X86Subtarget final : public X86GenSubtargetInfo {
/// Indicates target prefers AVX512 mask registers.
bool PreferMaskRegisters = false;
/// Use Silvermont specific arithmetic costs.
bool UseSLMArithCosts = false;
/// Use Goldmont specific floating point div/sqrt costs.
bool UseGLMDivSqrtCosts = false;
@ -797,6 +799,7 @@ public:
}
bool preferMaskRegisters() const { return PreferMaskRegisters; }
bool useSLMArithCosts() const { return UseSLMArithCosts; }
bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
bool allowTaggedGlobals() const { return AllowTaggedGlobals; }
@ -833,7 +836,6 @@ public:
/// TODO: to be removed later and replaced with suitable properties
bool isAtom() const { return X86ProcFamily == IntelAtom; }
bool isSLM() const { return X86ProcFamily == IntelSLM; }
bool useSoftFloat() const { return UseSoftFloat; }
bool useAA() const override { return UseAA; }

View File

@ -314,7 +314,7 @@ InstructionCost X86TTIImpl::getArithmeticInstrCost(
{ ISD::SUB, MVT::v2i64, 4 },
};
if (ST->isSLM()) {
if (ST->useSLMArithCosts()) {
if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
// Check if the operands can be shrinked into a smaller datatype.
// TODO: Merge this into generiic vXi32 MUL patterns above.
@ -2572,7 +2572,7 @@ InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
{ ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
};
if (ST->isSLM())
if (ST->useSLMArithCosts())
if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
return LT.first * (ExtraCost + Entry->Cost);
@ -3195,7 +3195,7 @@ X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
return adjustTableCost(*Entry, LT.first, ICA.getFlags());
if (ST->isSLM())
if (ST->useSLMArithCosts())
if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
return adjustTableCost(*Entry, LT.first, ICA.getFlags());
@ -3488,7 +3488,7 @@ InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Unexpected vector opcode");
MVT MScalarTy = LT.second.getScalarType();
if (ST->isSLM())
if (ST->useSLMArithCosts())
if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
return Entry->Cost + RegisterFileMoveCost;
@ -3905,7 +3905,7 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
EVT VT = TLI->getValueType(DL, ValTy);
if (VT.isSimple()) {
MVT MTy = VT.getSimpleVT();
if (ST->isSLM())
if (ST->useSLMArithCosts())
if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
return Entry->Cost;
@ -3944,7 +3944,7 @@ X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
ArithmeticCost *= LT.first - 1;
}
if (ST->isSLM())
if (ST->useSLMArithCosts())
if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
return ArithmeticCost + Entry->Cost;

View File

@ -80,6 +80,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
X86::TuningSlowUAMem16,
X86::TuningPreferMaskRegisters,
X86::TuningInsertVZEROUPPER,
X86::TuningUseSLMArithCosts,
X86::TuningUseGLMDivSqrtCosts,
// Perf-tuning flags.
@ -91,8 +92,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
X86::TuningPrefer256Bit,
// CPU name enums. These just follow CPU string.
X86::ProcIntelAtom,
X86::ProcIntelSLM,
X86::ProcIntelAtom
};
public: