[llvm] Fix comment typos (NFC)

This commit is contained in:
Kazu Hirata 2022-08-07 00:16:14 -07:00
parent af2d2d7759
commit a2d4501718
39 changed files with 46 additions and 46 deletions

View File

@ -834,8 +834,8 @@ public:
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const;
/// Return the ValueType for comparison libcalls. Comparions libcalls include
/// floating point comparion calls, and Ordered/Unordered check calls on
/// Return the ValueType for comparison libcalls. Comparison libcalls include
/// floating point comparison calls, and Ordered/Unordered check calls on
/// floating point numbers.
virtual
MVT::SimpleValueType getCmpLibcallReturnType() const;

View File

@ -2660,7 +2660,7 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
default:
return nullptr;
// Equality comaprisons are easy to fold.
// Equality comparisons are easy to fold.
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_NE:
break;

View File

@ -2780,7 +2780,7 @@ Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() {
} else if (auto *GI = dyn_cast<GlobalIFunc>(GV)) {
Type *ResolverFTy =
GlobalIFunc::getResolverFunctionType(GI->getValueType());
// Transparently fix up the type for compatiblity with older bitcode
// Transparently fix up the type for compatibility with older bitcode
GI->setResolver(
ConstantExpr::getBitCast(C, ResolverFTy->getPointerTo()));
} else {

View File

@ -104,7 +104,7 @@ class HoistSpillHelper : private LiveRangeEdit::Delegate {
// Map from pair of (StackSlot and Original VNI) to a set of spills which
// have the same stackslot and have equal values defined by Original VNI.
// These spills are mergeable and are hoist candiates.
// These spills are mergeable and are hoist candidates.
using MergeableSpillsMap =
MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
MergeableSpillsMap MergeableSpills;

View File

@ -1931,7 +1931,7 @@ void InstrRefBasedLDV::produceMLocTransferFunction(
Result.first->second = P;
}
// Accumulate any bitmask operands into the clobberred reg mask for this
// Accumulate any bitmask operands into the clobbered reg mask for this
// block.
for (auto &P : MTracker->Masks) {
BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords);

View File

@ -397,7 +397,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
}
}
// Use the target specific return value for comparions lib calls.
// Use the target specific return value for comparison lib calls.
EVT RetVT = getCmpLibcallReturnType();
SDValue Ops[2] = {NewLHS, NewRHS};
TargetLowering::MakeLibCallOptions CallOptions;

View File

@ -582,7 +582,7 @@ int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
for (const auto &P : PD) {
if (!P.isValid())
continue;
// The pressure differences are computed bottom-up, so the comparision for
// The pressure differences are computed bottom-up, so the comparison for
// an increase is positive in the bottom direction, but negative in the
// top-down direction.
if (HighPressureSets[P.getPSet()])

View File

@ -2427,7 +2427,7 @@ void ExecutionSession::OL_applyQueryPhase1(
// Add any non-candidates from the last JITDylib (if any) back on to the
// list of definition candidates for this JITDylib, reset definition
// non-candiates to the empty set.
// non-candidates to the empty set.
SymbolLookupSet Tmp;
std::swap(IPLS->DefGeneratorNonCandidates, Tmp);
IPLS->DefGeneratorCandidates.append(std::move(Tmp));

View File

@ -20,7 +20,7 @@ using namespace llvm;
namespace {
/// An example GC which attempts to be compatibile with Erlang/OTP garbage
/// An example GC which attempts to be compatible with Erlang/OTP garbage
/// collector.
///
/// The frametable emitter is in ErlangGCPrinter.cpp.

View File

@ -716,7 +716,7 @@ bool AsmLexer::isAtStartOfComment(const char *Ptr) {
if (CommentString.size() == 1)
return CommentString[0] == Ptr[0];
// Allow # preprocessor commments also be counted as comments for "##" cases
// Allow # preprocessor comments also be counted as comments for "##" cases
if (CommentString[1] == '#')
return CommentString[0] == Ptr[0];

View File

@ -1777,7 +1777,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
return true;
}
/// Remove comparision in csinc-cmp sequence
/// Remove comparison in csinc-cmp sequence
///
/// Examples:
/// 1. \code

View File

@ -9016,7 +9016,7 @@ static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
// Extract the vector elements from Op1 and Op2 one by one and truncate them
// to be the right size for the destination. For example, if Op1 is v4i1
// then the promoted vector is v4i32. The result of concatentation gives a
// then the promoted vector is v4i32. The result of concatenation gives a
// v8i1, which when promoted is v8i16. That means each i32 element from Op1
// needs truncating to i16 and inserting in the result.
EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);

View File

@ -302,7 +302,7 @@ class ARMAsmParser : public MCTargetAsmParser {
ITInst.addOperand(MCOperand::createImm(ITState.Mask));
Out.emitInstruction(ITInst, getSTI());
// Emit the conditonal instructions
// Emit the conditional instructions
assert(PendingConditionalInsts.size() <= 4);
for (const MCInst &Inst : PendingConditionalInsts) {
Out.emitInstruction(Inst, getSTI());

View File

@ -354,7 +354,7 @@ bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
return false;
}
/// Generate a machine instruction node for the new circlar buffer intrinsics.
/// Generate a machine instruction node for the new circular buffer intrinsics.
/// The new versions use a CSx register instead of the K field.
bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)

View File

@ -2127,7 +2127,7 @@ bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
!isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
}
// Return true if the instruction is a compund branch instruction.
// Return true if the instruction is a compound branch instruction.
bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
}

View File

@ -1379,7 +1379,7 @@ bool PPCMIPeephole::eliminateRedundantCompare() {
bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr);
// We cannot optimize an unsupported compare opcode or
// a mix of 32-bit and 64-bit comaprisons
// a mix of 32-bit and 64-bit comparisons
if (!isSupportedCmpOp(CMPI1->getOpcode()) ||
!isSupportedCmpOp(CMPI2->getOpcode()) ||
is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode()))

View File

@ -291,7 +291,7 @@ static bool hasPCRelativeForm(MachineInstr &Use) {
!BBI->modifiesRegister(Pair.DefReg, TRI))
continue;
// The use needs to be used in the address compuation and not
// The use needs to be used in the address computation and not
// as the register being stored for a store.
const MachineOperand *UseOp =
hasPCRelativeForm(*BBI) ? &BBI->getOperand(2) : nullptr;

View File

@ -3259,7 +3259,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
if (Subtarget.hasStdExtZbp()) {
// Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
// Convert BSWAP/BITREVERSE to GREVI to enable GREVI combining.
// Start with the maximum immediate value which is the bitwidth - 1.
unsigned Imm = VT.getSizeInBits() - 1;
// If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.

View File

@ -102,7 +102,7 @@ SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemset(
if (CByte) {
// Handle cases that can be done using at most two of
// MVI, MVHI, MVHHI and MVGHI. The latter two can only be
// used if ByteVal is all zeros or all ones; in other casees,
// used if ByteVal is all zeros or all ones; in other cases,
// we can move at most 2 halfwords.
uint64_t ByteVal = CByte->getZExtValue();
if (ByteVal == 0 || ByteVal == 255 ?

View File

@ -68,7 +68,7 @@ InstructionCost WebAssemblyTTIImpl::getArithmeticInstrCost(
case Instruction::Shl:
// SIMD128's shifts currently only accept a scalar shift count. For each
// element, we'll need to extract, op, insert. The following is a rough
// approxmation.
// approximation.
if (Opd2Info != TTI::OK_UniformValue &&
Opd2Info != TTI::OK_UniformConstantValue)
Cost =

View File

@ -5241,7 +5241,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
// Multiply is commmutative.
// Multiply is commutative.
if (!foldedLoad) {
foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
if (foldedLoad)

View File

@ -1401,7 +1401,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
if (MinSize == 2 && Subtarget->is32Bit() &&
Subtarget->isTargetWindowsMSVC() &&
(Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
// For compatibilty reasons, when targetting MSVC, is is important to
// For compatibility reasons, when targetting MSVC, is is important to
// generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
// rely specifically on this pattern to be able to patch a function.
// This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.

View File

@ -4247,7 +4247,7 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
if (!ST->hasAVX512())
return Cost + LT.first * (IsLoad ? 2 : 8);
// AVX-512 masked load/store is cheapper
// AVX-512 masked load/store is cheaper
return Cost + LT.first;
}

View File

@ -4296,7 +4296,7 @@ struct AADereferenceableFloating : AADereferenceableImpl {
} else if (OffsetSExt > 0) {
// If something was stripped but there is circular reasoning we look
// for the offset. If it is positive we basically decrease the
// dereferenceable bytes in a circluar loop now, which will simply
// dereferenceable bytes in a circular loop now, which will simply
// drive them down to the known value in a very slow way which we
// can accelerate.
T.indicatePessimisticFixpoint();
@ -5447,7 +5447,7 @@ struct AAValueSimplifyImpl : AAValueSimplify {
return nullptr;
}
/// Helper function for querying AAValueSimplify and updating candicate.
/// Helper function for querying AAValueSimplify and updating candidate.
/// \param IRP The value position we are trying to unify with SimplifiedValue
bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
const IRPosition &IRP, bool Simplify = true) {
@ -5586,7 +5586,7 @@ struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();
// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}
@ -5625,7 +5625,7 @@ struct AAValueSimplifyReturned : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();
// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}
@ -5662,7 +5662,7 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
if (!askSimplifiedValueForOtherAAs(A))
return indicatePessimisticFixpoint();
// If a candicate was found in this update, return CHANGED.
// If a candidate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
: ChangeStatus ::CHANGED;
}

View File

@ -1983,7 +1983,7 @@ bool CHR::run() {
findScopes(AllScopes);
CHR_DEBUG(dumpScopes(AllScopes, "All scopes"));
// Split the scopes if 1) the conditiona values of the biased
// Split the scopes if 1) the conditional values of the biased
// branches/selects of the inner/lower scope can't be hoisted up to the
// outermost/uppermost scope entry, or 2) the condition values of the biased
// branches/selects in a scope (including subscopes) don't share at least

View File

@ -1464,7 +1464,7 @@ bool DataFlowSanitizer::runImpl(Module &M) {
// br i1 icmp ne (i8 (i8)* @my_func, i8 (i8)* null), label %use_my_func,
// label %avoid_my_func
// The @"dfsw$my_func" wrapper is never null, so if we replace this use
// in the comparision, the icmp will simplify to false and we have
// in the comparison, the icmp will simplify to false and we have
// accidentially optimized away a null check that is necessary.
// This can lead to a crash when the null extern_weak my_func is called.
//

View File

@ -540,7 +540,7 @@ checkOuterLoopInsts(FlattenInfo &FI,
// they make a net difference of zero.
if (IterationInstructions.count(&I))
continue;
// The uncoditional branch to the inner loop's header will turn into
// The unconditional branch to the inner loop's header will turn into
// a fall-through, so adds no cost.
BranchInst *Br = dyn_cast<BranchInst>(&I);
if (Br && Br->isUnconditional() &&

View File

@ -699,7 +699,7 @@ private:
/// stating whether or not the two candidates are known at compile time to
/// have the same TripCount. The second is the difference in the two
/// TripCounts. This information can be used later to determine whether or not
/// peeling can be performed on either one of the candiates.
/// peeling can be performed on either one of the candidates.
std::pair<bool, Optional<unsigned>>
haveIdenticalTripCounts(const FusionCandidate &FC0,
const FusionCandidate &FC1) const {

View File

@ -146,7 +146,7 @@ static cl::opt<bool> EnablePhiElim(
"enable-lsr-phielim", cl::Hidden, cl::init(true),
cl::desc("Enable LSR phi elimination"));
// The flag adds instruction count to solutions cost comparision.
// The flag adds instruction count to solutions cost comparison.
static cl::opt<bool> InsnsCost(
"lsr-insns-cost", cl::Hidden, cl::init(true),
cl::desc("Add instruction count to a LSR cost model"));

View File

@ -16,7 +16,7 @@
// @a = alias i8, i8 *@g <-- @a is now an alias to base object @g
// @b = alias i8, i8 *@g
//
// Eventually this file will implement full alias canonicalation, so that
// Eventually this file will implement full alias canonicalization, so that
// all aliasees are private anonymous values. E.g.
// @a = alias i8, i8 *@g
// @g = global i8 0

View File

@ -778,7 +778,7 @@ private:
/// Merge two chains of blocks respecting a given merge 'type' and 'offset'.
///
/// If MergeType == 0, then the result is a concatentation of two chains.
/// If MergeType == 0, then the result is a concatenation of two chains.
/// Otherwise, the first chain is cut into two sub-chains at the offset,
/// and merged using all possible ways of concatenating three chains.
MergedChain mergeBlocks(const std::vector<Block *> &X,

View File

@ -1508,7 +1508,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// In canonical mode we compute the addrec as an expression of a canonical IV
// using evaluateAtIteration and expand the resulting SCEV expression. This
// way we avoid introducing new IVs to carry on the comutation of the addrec
// way we avoid introducing new IVs to carry on the computation of the addrec
// throughout the loop.
//
// For nested addrecs evaluateAtIteration might need a canonical IV of a
@ -2191,7 +2191,7 @@ template<typename T> static InstructionCost costAndCollectOperands(
}
case scAddRecExpr: {
// In this polynominal, we may have some zero operands, and we shouldn't
// really charge for those. So how many non-zero coeffients are there?
// really charge for those. So how many non-zero coefficients are there?
int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
return !Op->isZero();
});
@ -2200,7 +2200,7 @@ template<typename T> static InstructionCost costAndCollectOperands(
assert(!(*std::prev(S->operands().end()))->isZero() &&
"Last operand should not be zero");
// Ignoring constant term (operand 0), how many of the coeffients are u> 1?
// Ignoring constant term (operand 0), how many of the coefficients are u> 1?
int NumNonZeroDegreeNonOneTerms =
llvm::count_if(S->operands(), [](const SCEV *Op) {
auto *SConst = dyn_cast<SCEVConstant>(Op);

View File

@ -7016,7 +7016,7 @@ static bool removeUndefIntroducingPredecessor(BasicBlock *BB,
IRBuilder<> Builder(T);
if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
BB->removePredecessor(Predecessor);
// Turn uncoditional branches into unreachables and remove the dead
// Turn unconditional branches into unreachables and remove the dead
// destination from conditional branches.
if (BI->isUnconditional())
Builder.CreateUnreachable();

View File

@ -3063,7 +3063,7 @@ void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
// 1) If we know that we must execute the scalar epilogue, emit an
// unconditional branch.
// 2) Otherwise, we must have a single unique exit block (due to how we
// implement the multiple exit case). In this case, set up a conditonal
// implement the multiple exit case). In this case, set up a conditional
// branch from the middle block to the loop scalar preheader, and the
// exit block. completeLoopSkeleton will update the condition to use an
// iteration check, if required to decide whether to execute the remainder.

View File

@ -119,7 +119,7 @@ static inline void printCallStack(const SmallVectorImpl<uint64_t> &CallStack) {
// only changes the leaf of frame stack. \fn isEqual is a virtual function,
// which will have perf overhead. In the future, if we redesign a better hash
// function, then we can just skip this or switch to non-virtual function(like
// just ignore comparision if hash conflicts probabilities is low)
// just ignore comparison if hash conflicts probabilities is low)
template <class T> class Hashable {
public:
std::shared_ptr<T> Data;

View File

@ -398,7 +398,7 @@ static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen,
return;
// Generate random numbers between 10 and 99, which will line up nicely in
// debug output. We're not worried about collisons here.
// debug output. We're not worried about collisions here.
LLVM_DEBUG(dbgs() << "V = "; V->dump());
std::uniform_int_distribution<short> Dist(10, 99);
SmallDenseMap<const Use *, short, 16> Order;

View File

@ -2254,7 +2254,7 @@ populateInstruction(CodeGenTarget &Target, const Record &EncodingDef,
// fieldFromInstruction().
// On Windows we make sure that this function is not inlined when
// using the VS compiler. It has a bug which causes the function
// to be optimized out in some circustances. See llvm.org/pr38292
// to be optimized out in some circumstances. See llvm.org/pr38292
static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
OS << "// Helper functions for extracting fields from encoded instructions.\n"
<< "// InsnType must either be integral or an APInt-like object that "

View File

@ -247,7 +247,7 @@ public:
} else {
// When there is no value (that's most intermediate nodes)
// Dispense of the 3 values bytes, and only store
// 1 byte to track whether the node has sibling and chidren
// 1 byte to track whether the node has sibling and children
// + 2 bytes for the index of the first children if necessary.
// That index also uses bytes 0-6 of the previous byte.
uint8_t Byte =

View File

@ -313,7 +313,7 @@ class ReleaseWorkflow:
def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
"""
reate a pull request in `self.branch_repo_name`. The base branch of the
pull request will be choosen based on the the milestone attached to
pull request will be chosen based on the the milestone attached to
the issue represented by `self.issue_number` For example if the milestone
is Release 13.0.1, then the base branch will be release/13.x. `branch`
will be used as the compare branch.