[Transforms] Fix comment typos (NFC)
This commit is contained in:
parent
d3651aa697
commit
0e37ef0186
|
@ -2694,7 +2694,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
|
|||
}
|
||||
|
||||
// Later code makes structural assumptions about single predecessors phis e.g
|
||||
// that they are not live accross a suspend point.
|
||||
// that they are not live across a suspend point.
|
||||
cleanupSinglePredPHIs(F);
|
||||
|
||||
// Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
|
||||
|
|
|
@ -3298,7 +3298,7 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache,
|
|||
// Internalize non-exact functions
|
||||
// TODO: for now we eagerly internalize functions without calculating the
|
||||
// cost, we need a cost interface to determine whether internalizing
|
||||
// a function is "benefitial"
|
||||
// a function is "beneficial"
|
||||
if (AllowDeepWrapper) {
|
||||
unsigned FunSize = Functions.size();
|
||||
for (unsigned u = 0; u < FunSize; u++) {
|
||||
|
|
|
@ -707,7 +707,7 @@ struct State;
|
|||
} // namespace PointerInfo
|
||||
} // namespace AA
|
||||
|
||||
/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
|
||||
/// Helper for AA::PointerInfo::Access DenseMap/Set usage.
|
||||
template <>
|
||||
struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
|
||||
using Access = AAPointerInfo::Access;
|
||||
|
@ -722,7 +722,7 @@ template <>
|
|||
struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
|
||||
: DenseMapInfo<std::pair<int64_t, int64_t>> {};
|
||||
|
||||
/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
|
||||
/// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
|
||||
/// but the instruction
|
||||
struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
|
||||
using Base = DenseMapInfo<Instruction *>;
|
||||
|
@ -7771,7 +7771,7 @@ void AAMemoryLocationImpl::categorizePtrValue(
|
|||
// on the call edge, though, we should. To make that happen we need to
|
||||
// teach various passes, e.g., DSE, about the copy effect of a byval. That
|
||||
// would also allow us to mark functions only accessing byval arguments as
|
||||
// readnone again, atguably their acceses have no effect outside of the
|
||||
// readnone again, arguably their accesses have no effect outside of the
|
||||
// function, like accesses to allocas.
|
||||
MLK = NO_ARGUMENT_MEM;
|
||||
} else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
|
||||
|
|
|
@ -1210,7 +1210,7 @@ static Optional<unsigned> getGVNForPHINode(OutlinableRegion &Region,
|
|||
// the hash for the PHINode.
|
||||
OGVN = Cand.getGVN(IncomingBlock);
|
||||
|
||||
// If there is no number for the incoming block, it is becaause we have
|
||||
// If there is no number for the incoming block, it is because we have
|
||||
// split the candidate basic blocks. So we use the previous block that it
|
||||
// was split from to find the valid global value numbering for the PHINode.
|
||||
if (!OGVN) {
|
||||
|
|
|
@ -753,7 +753,7 @@ BranchProbability PartialInlinerImpl::getOutliningCallBBRelativeFreq(
|
|||
// is predicted to be less likely, the predicted probablity is usually
|
||||
// higher than the actual. For instance, the actual probability of the
|
||||
// less likely target is only 5%, but the guessed probablity can be
|
||||
// 40%. In the latter case, there is no need for further adjustement.
|
||||
// 40%. In the latter case, there is no need for further adjustment.
|
||||
// FIXME: add an option for this.
|
||||
if (OutlineRegionRelFreq < BranchProbability(45, 100))
|
||||
return OutlineRegionRelFreq;
|
||||
|
|
|
@ -1241,7 +1241,7 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
|
|||
}
|
||||
|
||||
bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
|
||||
// Instrument acesses from different address spaces only for AMDGPU.
|
||||
// Instrument accesses from different address spaces only for AMDGPU.
|
||||
Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
|
||||
if (PtrTy->getPointerAddressSpace() != 0 &&
|
||||
!(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
|
||||
|
|
|
@ -1146,7 +1146,7 @@ void DataFlowSanitizer::buildExternWeakCheckIfNeeded(IRBuilder<> &IRB,
|
|||
// but replacing with a known-to-not-be-null wrapper can break this check.
|
||||
// When replacing uses of the extern weak function with the wrapper we try
|
||||
// to avoid replacing uses in conditionals, but this is not perfect.
|
||||
// In the case where we fail, and accidentially optimize out a null check
|
||||
// In the case where we fail, and accidentally optimize out a null check
|
||||
// for a extern weak function, add a check here to help identify the issue.
|
||||
if (GlobalValue::isExternalWeakLinkage(F->getLinkage())) {
|
||||
std::vector<Value *> Args;
|
||||
|
@ -1465,7 +1465,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
|
|||
// label %avoid_my_func
|
||||
// The @"dfsw$my_func" wrapper is never null, so if we replace this use
|
||||
// in the comparison, the icmp will simplify to false and we have
|
||||
// accidentially optimized away a null check that is necessary.
|
||||
// accidentally optimized away a null check that is necessary.
|
||||
// This can lead to a crash when the null extern_weak my_func is called.
|
||||
//
|
||||
// To prevent (the most common pattern of) this problem,
|
||||
|
|
|
@ -708,7 +708,7 @@ Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
|
|||
}
|
||||
|
||||
bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
|
||||
// Do not instrument acesses from different address spaces; we cannot deal
|
||||
// Do not instrument accesses from different address spaces; we cannot deal
|
||||
// with them.
|
||||
Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
|
||||
if (PtrTy->getPointerAddressSpace() != 0)
|
||||
|
|
|
@ -385,7 +385,7 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
|
|||
if (!Access.Addr)
|
||||
return None;
|
||||
|
||||
// Do not instrument acesses from different address spaces; we cannot deal
|
||||
// Do not instrument accesses from different address spaces; we cannot deal
|
||||
// with them.
|
||||
Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType());
|
||||
if (PtrTy->getPointerAddressSpace() != 0)
|
||||
|
|
|
@ -379,7 +379,7 @@ static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Do not instrument acesses from different address spaces; we cannot deal
|
||||
// Do not instrument accesses from different address spaces; we cannot deal
|
||||
// with them.
|
||||
if (Addr) {
|
||||
Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
|
||||
|
|
|
@ -729,7 +729,7 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
|
|||
// operands.
|
||||
unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
|
||||
|
||||
// What is the smallest bit width that can accomodate the entire value ranges
|
||||
// What is the smallest bit width that can accommodate the entire value ranges
|
||||
// of both of the operands?
|
||||
std::array<Optional<ConstantRange>, 2> CRs;
|
||||
unsigned MinSignedBits = 0;
|
||||
|
@ -781,7 +781,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
|
|||
// Find the smallest power of two bitwidth that's sufficient to hold Instr's
|
||||
// operands.
|
||||
|
||||
// What is the smallest bit width that can accomodate the entire value ranges
|
||||
// What is the smallest bit width that can accommodate the entire value ranges
|
||||
// of both of the operands?
|
||||
unsigned MaxActiveBits = 0;
|
||||
for (Value *Operand : Instr->operands()) {
|
||||
|
|
|
@ -471,7 +471,7 @@ uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
|
|||
}
|
||||
|
||||
if (local_dep.isDef()) {
|
||||
// For masked load/store intrinsics, the local_dep may actully be
|
||||
// For masked load/store intrinsics, the local_dep may actually be
|
||||
// a normal load or store instruction.
|
||||
CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
|
||||
|
||||
|
|
|
@ -1988,7 +1988,7 @@ bool llvm::promoteLoopAccessesToScalars(
|
|||
IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
|
||||
}
|
||||
|
||||
// Check that all accesses to pointers in the aliass set use the same type.
|
||||
// Check that all accesses to pointers in the alias set use the same type.
|
||||
// We cannot (yet) promote a memory location that is loaded and stored in
|
||||
// different sizes. While we are at it, collect alignment and AA info.
|
||||
Type *AccessTy = nullptr;
|
||||
|
|
|
@ -139,7 +139,7 @@ struct FlattenInfo {
|
|||
|
||||
PHINode *NarrowInnerInductionPHI = nullptr; // Holds the old/narrow induction
|
||||
PHINode *NarrowOuterInductionPHI = nullptr; // phis, i.e. the Phis before IV
|
||||
// has been apllied. Used to skip
|
||||
// has been applied. Used to skip
|
||||
// checks on phi nodes.
|
||||
|
||||
FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL){};
|
||||
|
|
|
@ -428,7 +428,7 @@ using LoopVector = SmallVector<Loop *, 4>;
|
|||
// order. Thus, if FC0 comes *before* FC1 in a FusionCandidateSet, then FC0
|
||||
// dominates FC1 and FC1 post-dominates FC0.
|
||||
// std::set was chosen because we want a sorted data structure with stable
|
||||
// iterators. A subsequent patch to loop fusion will enable fusing non-ajdacent
|
||||
// iterators. A subsequent patch to loop fusion will enable fusing non-adjacent
|
||||
// loops by moving intervening code around. When this intervening code contains
|
||||
// loops, those loops will be moved also. The corresponding FusionCandidates
|
||||
// will also need to be moved accordingly. As this is done, having stable
|
||||
|
|
|
@ -474,7 +474,7 @@ private:
|
|||
NumLoopBlocksDeleted += DeadLoopBlocks.size();
|
||||
}
|
||||
|
||||
/// Constant-fold terminators of blocks acculumated in FoldCandidates into the
|
||||
/// Constant-fold terminators of blocks accumulated in FoldCandidates into the
|
||||
/// unconditional branches.
|
||||
void foldTerminators() {
|
||||
for (BasicBlock *BB : FoldCandidates) {
|
||||
|
|
|
@ -300,8 +300,8 @@ static bool sinkLoopInvariantInstructions(Loop &L, AAResults &AA, LoopInfo &LI,
|
|||
return BFI.getBlockFreq(A) < BFI.getBlockFreq(B);
|
||||
});
|
||||
|
||||
// Traverse preheader's instructions in reverse order becaue if A depends
|
||||
// on B (A appears after B), A needs to be sinked first before B can be
|
||||
// Traverse preheader's instructions in reverse order because if A depends
|
||||
// on B (A appears after B), A needs to be sunk first before B can be
|
||||
// sinked.
|
||||
for (Instruction &I : llvm::make_early_inc_range(llvm::reverse(*Preheader))) {
|
||||
if (isa<PHINode>(&I))
|
||||
|
|
|
@ -1222,7 +1222,7 @@ static LoopUnrollResult tryToUnrollLoop(
|
|||
// Find the smallest exact trip count for any exit. This is an upper bound
|
||||
// on the loop trip count, but an exit at an earlier iteration is still
|
||||
// possible. An unroll by the smallest exact trip count guarantees that all
|
||||
// brnaches relating to at least one exit can be eliminated. This is unlike
|
||||
// branches relating to at least one exit can be eliminated. This is unlike
|
||||
// the max trip count, which only guarantees that the backedge can be broken.
|
||||
unsigned TripCount = 0;
|
||||
unsigned TripMultiple = 1;
|
||||
|
|
|
@ -331,7 +331,7 @@ static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount,
|
|||
|
||||
/// This "heuristic" exactly matches implicit behavior which used to exist
|
||||
/// inside getLoopEstimatedTripCount. It was added here to keep an
|
||||
/// improvement inside that API from causing peeling to become more agressive.
|
||||
/// improvement inside that API from causing peeling to become more aggressive.
|
||||
/// This should probably be removed.
|
||||
static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
|
||||
BasicBlock *Latch = L->getLoopLatch();
|
||||
|
|
|
@ -218,7 +218,7 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit,
|
|||
for (PHINode &PN : NewExit->phis()) {
|
||||
// PN should be used in another PHI located in Exit block as
|
||||
// Exit was split by SplitBlockPredecessors into Exit and NewExit
|
||||
// Basicaly it should look like:
|
||||
// Basically it should look like:
|
||||
// NewExit:
|
||||
// PN = PHI [I, Latch]
|
||||
// ...
|
||||
|
|
|
@ -126,7 +126,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
|
|||
Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
|
||||
Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));
|
||||
|
||||
// Calaculate the new index
|
||||
// Calculate the new index
|
||||
unsigned OperandSize = DL.getTypeStoreSize(OpTy);
|
||||
assert(
|
||||
(!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
|
||||
|
|
|
@ -9394,7 +9394,7 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
|
|||
WorkList.push_back(DestBundle);
|
||||
};
|
||||
|
||||
// Any instruction which isn't safe to speculate at the begining of the
|
||||
// Any instruction which isn't safe to speculate at the beginning of the
|
||||
// block is control dependend on any early exit or non-willreturn call
|
||||
// which proceeds it.
|
||||
if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
|
||||
|
|
|
@ -243,7 +243,7 @@ void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB,
|
|||
for (Value *Op : Inst->operands())
|
||||
VPOperands.push_back(getOrCreateVPOperand(Op));
|
||||
|
||||
// Build VPInstruction for any arbitraty Instruction without specific
|
||||
// Build VPInstruction for any arbitrary Instruction without specific
|
||||
// representation in VPlan.
|
||||
NewVPV = cast<VPInstruction>(
|
||||
VPIRBuilder.createNaryOp(Inst->getOpcode(), VPOperands, Inst));
|
||||
|
|
Loading…
Reference in New Issue