161 lines
6.1 KiB
C++
161 lines
6.1 KiB
C++
//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
/// \file
|
|
/// This file defines a TargetTransformInfo::Concept conforming object specific
|
|
/// to the RISC-V target machine. It uses the target's detailed information to
|
|
/// provide more precise answers to certain TTI queries, while letting the
|
|
/// target independent and default TTI implementations handle the rest.
|
|
///
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
|
|
#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
|
|
|
|
#include "RISCVSubtarget.h"
|
|
#include "RISCVTargetMachine.h"
|
|
#include "llvm/Analysis/IVDescriptors.h"
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
#include "llvm/CodeGen/BasicTTIImpl.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include <optional>
|
|
|
|
namespace llvm {
|
|
|
|
class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
|
|
using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
|
|
using TTI = TargetTransformInfo;
|
|
|
|
friend BaseT;
|
|
|
|
const RISCVSubtarget *ST;
|
|
const RISCVTargetLowering *TLI;
|
|
|
|
const RISCVSubtarget *getST() const { return ST; }
|
|
const RISCVTargetLowering *getTLI() const { return TLI; }
|
|
|
|
/// Return the cost of LMUL. The larger the LMUL, the higher the cost.
|
|
InstructionCost getLMULCost(MVT VT) { return 1; }
|
|
|
|
public:
|
|
explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
|
|
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
|
|
TLI(ST->getTargetLowering()) {}
|
|
|
|
/// Return the cost of materializing an immediate for a value operand of
|
|
/// a store instruction.
|
|
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
|
|
TTI::TargetCostKind CostKind);
|
|
|
|
bool hasBranchDivergence() { return ST->hasVInstructions(); }
|
|
|
|
bool useGPUDivergenceAnalysis() { return ST->hasVInstructions(); }
|
|
|
|
/// \returns true if the result of the value could potentially be
|
|
/// different across workitems in a wavefront.
|
|
bool isSourceOfDivergence(const Value *V) const;
|
|
|
|
/// FIXME: Copy from AMDGPU, maybe needed in Ventus
|
|
// bool isAlwaysUniform(const Value *V) const;
|
|
|
|
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
|
|
TTI::TargetCostKind CostKind);
|
|
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
|
|
const APInt &Imm, Type *Ty,
|
|
TTI::TargetCostKind CostKind,
|
|
Instruction *Inst = nullptr);
|
|
InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
|
|
const APInt &Imm, Type *Ty,
|
|
TTI::TargetCostKind CostKind);
|
|
|
|
TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
|
|
|
|
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
|
|
return TypeSize::getFixed(ST->getXLen());
|
|
}
|
|
|
|
unsigned getRegUsageForType(Type *Ty);
|
|
|
|
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
|
TTI::UnrollingPreferences &UP,
|
|
OptimizationRemarkEmitter *ORE);
|
|
|
|
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
|
|
TTI::PeelingPreferences &PP);
|
|
|
|
InstructionCost
|
|
getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
|
|
unsigned AddressSpace, TTI::TargetCostKind CostKind,
|
|
TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
|
|
const Instruction *I = nullptr);
|
|
|
|
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
|
|
CmpInst::Predicate VecPred,
|
|
TTI::TargetCostKind CostKind,
|
|
const Instruction *I = nullptr);
|
|
|
|
InstructionCost getArithmeticInstrCost(
|
|
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
|
|
TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
|
|
TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
|
|
ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
|
|
const Instruction *CxtI = nullptr);
|
|
|
|
enum RISCVRegisterClass { GPRRC, FPRRC, VRRC };
|
|
unsigned getNumberOfRegisters(unsigned ClassID) const {
|
|
switch (ClassID) {
|
|
case RISCVRegisterClass::GPRRC:
|
|
// 31 = 32 GPR - x0 (zero register)
|
|
// FIXME: Should we exclude fixed registers like SP, TP or GP?
|
|
return 63;
|
|
case RISCVRegisterClass::FPRRC:
|
|
if (ST->hasStdExtF())
|
|
return 32;
|
|
return 0;
|
|
case RISCVRegisterClass::VRRC:
|
|
// Although there are 32 vector registers, v0 is special in that it is the
|
|
// only register that can be used to hold a mask.
|
|
// FIXME: Should we conservatively return 31 as the number of usable
|
|
// vector registers?
|
|
return ST->hasVInstructions() ? 256 : 0;
|
|
}
|
|
llvm_unreachable("unknown register class");
|
|
}
|
|
|
|
unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
|
|
if (Vector)
|
|
return RISCVRegisterClass::VRRC;
|
|
if (!Ty)
|
|
return RISCVRegisterClass::GPRRC;
|
|
|
|
Type *ScalarTy = Ty->getScalarType();
|
|
if ((ScalarTy->isHalfTy() && ST->hasStdExtZfh()) ||
|
|
(ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
|
|
(ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
|
|
return RISCVRegisterClass::FPRRC;
|
|
}
|
|
|
|
return RISCVRegisterClass::GPRRC;
|
|
}
|
|
|
|
const char *getRegisterClassName(unsigned ClassID) const {
|
|
switch (ClassID) {
|
|
case RISCVRegisterClass::GPRRC:
|
|
return "RISCV::GPRRC";
|
|
case RISCVRegisterClass::FPRRC:
|
|
return "RISCV::FPRRC";
|
|
case RISCVRegisterClass::VRRC:
|
|
return "RISCV::VRRC";
|
|
}
|
|
llvm_unreachable("unknown register class");
|
|
}
|
|
};
|
|
|
|
} // end namespace llvm
|
|
|
|
#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
|