diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp index 59369fba2e772..ad32e726c2e4c 100644 --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -4912,8 +4912,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { QualType type = getCanonicalTypeInternal(); switch (type->getTypeClass()) { - // We'll only see canonical types here. #define NON_CANONICAL_TYPE(Class, Parent) \ + // We'll only see canonical types here. \ case Type::Class: \ llvm_unreachable("non-canonical type"); #define TYPE(Class, Parent) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index b5a7217dd3e56..621a26b16b040 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -672,10 +672,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); - // If we don't have a prototype at all, but we're supposed to - // explicitly use the variadic convention for unprototyped calls, - // treat all of the arguments as required but preserve the nominal - // possibility of variadics. + // If we don't have a prototype at all, but we're supposed to + // explicitly use the variadic convention for unprototyped calls, + // treat all of the arguments as required but preserve the nominal + // possibility of variadics. } else if (CGM.getTargetCodeGenInfo().isNoProtoCallVariadic( args, cast(fnType))) { required = RequiredArgs(args.size()); @@ -4064,7 +4064,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, if (results.size() == 1) { RV = results[0]; - // Otherwise, we need to make a first-class aggregate. + // Otherwise, we need to make a first-class aggregate. } else { // Construct a return type that lacks padding elements. llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); @@ -4203,11 +4203,11 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, if (type->isReferenceType()) { args.add(RValue::get(Builder.CreateLoad(local)), type); - // In ARC, move out of consumed arguments so that the release cleanup - // entered by StartFunction doesn't cause an over-release. This isn't - // optimal -O0 code generation, but it should get cleaned up when - // optimization is enabled. This also assumes that delegate calls are - // performed exactly once for a set of arguments, but that should be safe. + // In ARC, move out of consumed arguments so that the release cleanup + // entered by StartFunction doesn't cause an over-release. This isn't + // optimal -O0 code generation, but it should get cleaned up when + // optimization is enabled. This also assumes that delegate calls are + // performed exactly once for a set of arguments, but that should be safe. } else if (getLangOpts().ObjCAutoRefCount && param->hasAttr() && type->isObjCRetainableType()) { llvm::Value *ptr = Builder.CreateLoad(local); @@ -4216,8 +4216,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, Builder.CreateStore(null, local); args.add(RValue::get(ptr), type); - // For the most part, we just need to load the alloca, except that - // aggregate r-values are actually pointers to temporaries. + // For the most part, we just need to load the alloca, except that + // aggregate r-values are actually pointers to temporaries. } else { args.add(convertTempToRValue(local, type, loc), type); } @@ -4309,7 +4309,7 @@ static void emitWriteback(CodeGenFunction &CGF, // Release the old value. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); - // Otherwise, we can just do a normal lvalue store. + // Otherwise, we can just do a normal lvalue store. } else { CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); } @@ -4350,7 +4350,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { srcLV = CGF.EmitLValue(lvExpr); - // Otherwise, just emit it as a scalar. + // Otherwise, just emit it as a scalar. } else { Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());