diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 526e2107afa042..0f6c660e698b09 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -65,6 +65,7 @@ class Module; class NamespaceDecl; class ParmVarDecl; class RecordDecl; +class RecordFieldReorganizer; class Stmt; class StringLiteral; class TagDecl; diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h index 9117f53487d65b..2cc1b483aaa344 100644 --- a/clang/include/clang/AST/DeclBase.h +++ b/clang/include/clang/AST/DeclBase.h @@ -1270,6 +1270,8 @@ class DeclContext { friend class ExternalASTSource; /// For CreateStoredDeclsMap friend class DependentDiagnostic; + /// For fine-grained control of field order + friend class RecordFieldReorganizer; /// For hasNeedToReconcileExternalVisibleStorage, /// hasLazyLocalLexicalLookups, hasLazyExternalLexicalLookups friend class ASTWriter; diff --git a/clang/include/clang/AST/RandstructSeed.h b/clang/include/clang/AST/RandstructSeed.h new file mode 100644 index 00000000000000..eefdd8916f4b6a --- /dev/null +++ b/clang/include/clang/AST/RandstructSeed.h @@ -0,0 +1,8 @@ +#ifndef RANDSTRUCTSEED_H +#define RANDSTRUCTSEED_H +#include +namespace clang { +extern std::string RandstructSeed; +extern bool RandstructAutoSelect; +} +#endif diff --git a/clang/include/clang/AST/RecordFieldReorganizer.h b/clang/include/clang/AST/RecordFieldReorganizer.h new file mode 100644 index 00000000000000..7e7680b13eccea --- /dev/null +++ b/clang/include/clang/AST/RecordFieldReorganizer.h @@ -0,0 +1,61 @@ +//===-- RecordFieldReorganizer.h - Interface for manipulating field order --*- +// C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This header file contains the base class that defines an interface for +// manipulating a RecordDecl's field layouts. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_AST_RECORDFIELDREORGANIZER_H +#define LLVM_CLANG_LIB_AST_RECORDFIELDREORGANIZER_H + +#include "Decl.h" +#include + +namespace clang { + +// FIXME: Find a better alternative to SmallVector with hardcoded size! + +class RecordFieldReorganizer { +public: + virtual ~RecordFieldReorganizer() = default; + void reorganizeFields(const ASTContext &C, const RecordDecl *D); + +protected: + virtual void reorganize(const ASTContext &C, const RecordDecl *D, + SmallVector &NewOrder) = 0; + +private: + void commit(const RecordDecl *D, + SmallVectorImpl &NewFieldOrder) const; +}; + +class Randstruct : public RecordFieldReorganizer { +private: + std::seed_seq Seq; + std::default_random_engine rng; + +public: + /// Determines if the Record can be safely and easily randomized based on + /// certain criteria (see implementation). + Randstruct(std::string seed) : Seq(seed.begin(), seed.end()), rng(Seq) {} + static bool isTriviallyRandomizable(const RecordDecl *D); + +protected: + SmallVector randomize(SmallVector fields); + SmallVector perfrandomize(const ASTContext &ctx, + SmallVector fields); + virtual void reorganize(const ASTContext &C, const RecordDecl *D, + SmallVector &NewOrder) override; +}; + +} // namespace clang + +#endif diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 849d2c228b3225..8099366d19a0b4 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -3201,3 +3201,10 @@ def ObjCExternallyRetained : InheritableAttr { let Subjects = SubjectList<[NonParmVar, Function, Block, ObjCMethod]>; let Documentation = [ObjCExternallyRetainedDocs]; } + +def RandomizeLayout : InheritableAttr { + let Spellings = [GCC<"randomize_layout">, Declspec<"randomize_layout">, + Keyword<"randomize_layout">]; + let Subjects = SubjectList<[Record]>; + let Documentation = [ClangRandstructDocs]; +} diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index dada1badcd3824..a6f109f41d6da6 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -4094,4 +4094,22 @@ Likewise, when applied to a strong local variable, that variable becomes ``const`` and is considered externally-retained. When compiled without ``-fobjc-arc``, this attribute is ignored. -}]; } +}]; +} + +def ClangRandstructDocs : Documentation { + let Category = DocCatVariable; + let Content = [{ +The attribute ``randomize_layout`` can be applied to the declaration of +a record. ``randomize_layout`` instructs the compiler to randomize the memory layout +of the member variables of the record. +.. code-block:: c + + // Indicates that this struct should be randomized by Randstruct implementation. + struct s { + char *a; + char *b; + char *c; + }__attribute__((randomize_layout)); +}]; +} diff --git a/clang/include/clang/Basic/DiagnosticASTKinds.td b/clang/include/clang/Basic/DiagnosticASTKinds.td index c2a390fa465d81..406325d3677a29 100644 --- a/clang/include/clang/Basic/DiagnosticASTKinds.td +++ b/clang/include/clang/Basic/DiagnosticASTKinds.td @@ -343,4 +343,6 @@ def warn_padded_struct_size : Warning< InGroup, DefaultIgnore; def warn_unnecessary_packed : Warning< "packed attribute is unnecessary for %0">, InGroup, DefaultIgnore; +def warn_randomize_attr_union : Warning< + "union declared with 'randomize_layout' attribute">, InGroup>; } diff --git a/clang/include/clang/Driver/CC1Options.td b/clang/include/clang/Driver/CC1Options.td index cead87201eee74..924122df92d60c 100644 --- a/clang/include/clang/Driver/CC1Options.td +++ b/clang/include/clang/Driver/CC1Options.td @@ -422,6 +422,8 @@ def fcaret_diagnostics_max_lines : HelpText<"Set the maximum number of source lines to show in a caret diagnostic">; def fmessage_length : Separate<["-"], "fmessage-length">, MetaVarName<"">, HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">; +def randstruct_seed : Separate<["-"], "randstruct-seed">, MetaVarName<"">, + HelpText<"Randomization seed for random struct layouts">; def verify_EQ : CommaJoined<["-"], "verify=">, MetaVarName<"">, HelpText<"Verify diagnostic output using comment directives that start with" diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 57050412ab5dc2..edf714adbf9360 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1258,6 +1258,7 @@ def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">, def fmerge_all_constants : Flag<["-"], "fmerge-all-constants">, Group, Flags<[CC1Option, CoreOption]>, HelpText<"Allow merging of constants">; def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group; +def randstruct_seed_EQ : Joined<["-"], "randstruct-seed=">, Group; def fms_extensions : Flag<["-"], "fms-extensions">, Group, Flags<[CC1Option, CoreOption]>, HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">; def fms_compatibility : Flag<["-"], "fms-compatibility">, Group, Flags<[CC1Option, CoreOption]>, @@ -1750,6 +1751,9 @@ def freroll_loops : Flag<["-"], "freroll-loops">, Group, HelpText<"Turn on loop reroller">, Flags<[CC1Option]>; def fno_reroll_loops : Flag<["-"], "fno-reroll-loops">, Group, HelpText<"Turn off loop reroller">; +def randstruct_auto : Flag<["-"], "randstruct-auto">, + HelpText<"Enable automatic structure selection for field randomization; " + "Disable for specific structures with attribute no_randomize_layout">, Flags<[CC1Option]>; def ftrigraphs : Flag<["-"], "ftrigraphs">, Group, HelpText<"Process trigraph sequences">, Flags<[CC1Option]>; def fno_trigraphs : Flag<["-"], "fno-trigraphs">, Group, diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt index 570ca718acf5dc..08acf687971957 100644 --- a/clang/lib/AST/CMakeLists.txt +++ b/clang/lib/AST/CMakeLists.txt @@ -44,6 +44,7 @@ add_clang_library(clangAST InheritViz.cpp ItaniumCXXABI.cpp ItaniumMangle.cpp + RecordFieldReorganizer.cpp Mangle.cpp MicrosoftCXXABI.cpp MicrosoftMangle.cpp diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp index a44c83981586ec..5331fc657dc1bd 100644 --- a/clang/lib/AST/DeclBase.cpp +++ b/clang/lib/AST/DeclBase.cpp @@ -1257,6 +1257,9 @@ DeclContext::BuildDeclChain(ArrayRef Decls, PrevDecl = D; } + // The last one in the chain should have a null next! + PrevDecl->NextInContextAndBits.setPointer(nullptr); + return std::make_pair(FirstNewDecl, PrevDecl); } diff --git a/clang/lib/AST/RecordFieldReorganizer.cpp b/clang/lib/AST/RecordFieldReorganizer.cpp new file mode 100644 index 00000000000000..a4090908e4cadb --- /dev/null +++ b/clang/lib/AST/RecordFieldReorganizer.cpp @@ -0,0 +1,263 @@ +//===----- RecordFieldReorganizer.cpp - Implementation for field reorder -*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Contains the implementation for RecordDecl field reordering. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/RecordFieldReorganizer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/RandstructSeed.h" + +#include +#include +#include +#include +#include + +// FIXME: Find a better alternative to SmallVector with hardcoded size! + +namespace clang { +std::string RandstructSeed = ""; +bool RandstructAutoSelect = false; + +void RecordFieldReorganizer::reorganizeFields(const ASTContext &C, + const RecordDecl *D) { + // Save original fields for asserting later that a subclass hasn't + // sabotaged the RecordDecl by removing or adding fields + std::set mutateGuard; + + SmallVector fields; + for (auto f : D->fields()) { + mutateGuard.insert(f); + fields.push_back(f); + } + // Now allow subclass implementations to reorder the fields + reorganize(C, D, fields); + + // Assert all fields are still present + assert(mutateGuard.size() == fields.size() && + "Field count altered after reorganization"); + for (auto f : fields) { + auto found = std::find(std::begin(mutateGuard), std::end(mutateGuard), f); + assert(found != std::end(mutateGuard) && + "Unknown field encountered after reorganization"); + } + + commit(D, fields); +} +void RecordFieldReorganizer::commit( + const RecordDecl *D, SmallVectorImpl &NewFieldOrder) const { + Decl *First, *Last; + std::tie(First, Last) = DeclContext::BuildDeclChain( + NewFieldOrder, D->hasLoadedFieldsFromExternalStorage()); + D->FirstDecl = First; + D->LastDecl = Last; +} + +/// Bucket to store fields up to size of a cache line during randomization. +class Bucket { +public: + virtual ~Bucket() = default; + /// Returns a randomized version of the bucket. + virtual SmallVector + randomize(std::default_random_engine rng); + /// Checks if an added element would fit in a cache line. + virtual bool canFit(size_t size) const; + /// Adds a field to the bucket. + void add(FieldDecl *field, size_t size); + /// Is this bucket for bitfields? + virtual bool isBitfieldRun() const; + /// Is this bucket full? + bool full() const; + bool empty() const; + +protected: + size_t size; + SmallVector fields; +}; + +/// BitfieldRun is a bucket for storing adjacent bitfields that may +/// exceed the size of a cache line. +class BitfieldRun : public Bucket { +public: + virtual SmallVector + randomize(std::default_random_engine rng) override; + virtual bool canFit(size_t size) const override; + virtual bool isBitfieldRun() const override; +}; + +// FIXME: Is there a way to detect this? (i.e. on 32bit system vs 64?) +const size_t CACHE_LINE = 64; + +SmallVector Bucket::randomize(std::default_random_engine rng) { + std::shuffle(std::begin(fields), std::end(fields), rng); + return fields; +} + +bool Bucket::canFit(size_t size) const { + // We will say we can fit any size if the bucket is empty + // because there are many instances where a field is much + // larger than 64 bits (i.e., an array, a structure, etc) + // but it still must be placed into a bucket. + // + // Otherwise, if the bucket has elements and we're still + // trying to create a cache-line sized grouping, we cannot + // fit a larger field in here. + return empty() || this->size + size <= CACHE_LINE; +} + +void Bucket::add(FieldDecl *field, size_t size) { + fields.push_back(field); + this->size += size; +} + +bool Bucket::isBitfieldRun() const { + // The normal bucket is not a bitfieldrun. This is to avoid RTTI. + return false; +} + +bool Bucket::full() const { + // We're full if our size is a cache line. + return size >= CACHE_LINE; +} + +bool Bucket::empty() const { return size == 0; } + +SmallVector +BitfieldRun::randomize(std::default_random_engine rng) { + // Keep bit fields adjacent, we will not scramble them. + return fields; +} + +bool BitfieldRun::canFit(size_t size) const { + // We can always fit another adjacent bitfield. + return true; +} + +bool BitfieldRun::isBitfieldRun() const { return true; } + +SmallVector Randstruct::randomize(SmallVector fields) { + std::shuffle(std::begin(fields), std::end(fields), rng); + return fields; +} + +SmallVector +Randstruct::perfrandomize(const ASTContext &ctx, + SmallVector fields) { + // All of the buckets produced by best-effort cache-line algorithm. + std::vector> buckets; + + // The current bucket of fields that we are trying to fill to a cache-line. + std::unique_ptr currentBucket = nullptr; + // The current bucket containing the run of adjacent bitfields to ensure + // they remain adjacent. + std::unique_ptr currentBitfieldRun = nullptr; + + // Tracks the number of fields that we failed to fit to the current bucket, + // and thus still need to be added later. + size_t skipped = 0; + + while (!fields.empty()) { + // If we've skipped more fields than we have remaining to place, + // that means that they can't fit in our current bucket, and we + // need to start a new one. + if (skipped >= fields.size()) { + skipped = 0; + buckets.push_back(std::move(currentBucket)); + } + + // Take the first field that needs to be put in a bucket. + auto field = fields.begin(); + auto *f = llvm::cast(*field); + + if (f->isBitField()) { + // Start a bitfield run if this is the first bitfield + // we have found. + if (!currentBitfieldRun) { + currentBitfieldRun = llvm::make_unique(); + } + + // We've placed the field, and can remove it from the + // "awaiting buckets" vector called "fields" + currentBitfieldRun->add(f, 1); + fields.erase(field); + } else { + // Else, current field is not a bitfield + // If we were previously in a bitfield run, end it. + if (currentBitfieldRun) { + buckets.push_back(std::move(currentBitfieldRun)); + } + // If we don't have a bucket, make one. + if (!currentBucket) { + currentBucket = llvm::make_unique(); + } + + auto width = ctx.getTypeInfo(f->getType()).Width; + + // If we can fit, add it. + if (currentBucket->canFit(width)) { + currentBucket->add(f, width); + fields.erase(field); + + // If it's now full, tie off the bucket. + if (currentBucket->full()) { + skipped = 0; + buckets.push_back(std::move(currentBucket)); + } + } else { + // We can't fit it in our current bucket. + // Move to the end for processing later. + ++skipped; // Mark it skipped. + fields.push_back(f); + fields.erase(field); + } + } + } + + // Done processing the fields awaiting a bucket. + + // If we were filling a bucket, tie it off. + if (currentBucket) { + buckets.push_back(std::move(currentBucket)); + } + + // If we were processing a bitfield run bucket, tie it off. + if (currentBitfieldRun) { + buckets.push_back(std::move(currentBitfieldRun)); + } + + std::shuffle(std::begin(buckets), std::end(buckets), rng); + + // Produce the new ordering of the elements from our buckets. + SmallVector finalOrder; + for (auto &bucket : buckets) { + auto randomized = bucket->randomize(rng); + finalOrder.insert(finalOrder.end(), randomized.begin(), randomized.end()); + } + + return finalOrder; +} + +void Randstruct::reorganize(const ASTContext &C, const RecordDecl *D, + SmallVector &NewOrder) { + SmallVector randomized = perfrandomize(C, NewOrder); + NewOrder = randomized; +} +bool Randstruct::isTriviallyRandomizable(const RecordDecl *D) { + for (auto f : D->fields()) { + // If an element of the structure does not have a + // function type is not a function pointer + if (f->getFunctionType() == nullptr) { + return false; + } + } + return true; +} +} // namespace clang diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp index 99b7cbd02240ac..8e1e8417841e06 100644 --- a/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/clang/lib/AST/RecordLayoutBuilder.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "clang/AST/RecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/Attr.h" @@ -15,6 +14,9 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" +#include "clang/AST/RandstructSeed.h" +#include "clang/AST/RecordFieldReorganizer.h" +#include "clang/AST/RecordLayout.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Support/Format.h" @@ -43,7 +45,7 @@ struct BaseSubobjectInfo { bool IsVirtual; /// Bases - Information about the base subobjects. - SmallVector Bases; + SmallVector Bases; /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base /// of this base info (if one exists). @@ -77,8 +79,7 @@ struct ExternalLayout { /// Get the offset of the given field. The external source must provide /// entries for all fields in the record. uint64_t getExternalFieldOffset(const FieldDecl *FD) { - assert(FieldOffsets.count(FD) && - "Field does not have an external offset"); + assert(FieldOffsets.count(FD) && "Field does not have an external offset"); return FieldOffsets[FD]; } @@ -127,8 +128,7 @@ class EmptySubobjectMap { CharUnits Offset, bool PlacingEmptyBase); void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, - const CXXRecordDecl *Class, - CharUnits Offset); + const CXXRecordDecl *Class, CharUnits Offset); void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset); /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty @@ -137,8 +137,8 @@ class EmptySubobjectMap { return Offset <= MaxEmptyClassOffset; } - CharUnits - getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const { + CharUnits getFieldOffset(const ASTRecordLayout &Layout, + unsigned FieldNo) const { uint64_t FieldOffset = Layout.getFieldOffset(FieldNo); assert(FieldOffset % CharWidth == 0 && "Field offset not at char boundary!"); @@ -166,16 +166,15 @@ class EmptySubobjectMap { CharUnits SizeOfLargestEmptySubobject; EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class) - : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) { - ComputeEmptySubobjectSizes(); + : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) { + ComputeEmptySubobjectSizes(); } /// CanPlaceBaseAtOffset - Return whether the given base class can be placed /// at the given offset. /// Returns false if placing the record will result in two components /// (direct or indirect) of the same type having the same offset. - bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, - CharUnits Offset); + bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, CharUnits Offset); /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given /// offset. @@ -226,9 +225,8 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { } } -bool -EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, - CharUnits Offset) const { +bool EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, + CharUnits Offset) const { // We only need to check empty bases. if (!RD->isEmpty()) return true; @@ -264,9 +262,8 @@ void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD, MaxEmptyClassOffset = Offset; } -bool -EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, - CharUnits Offset) { +bool EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset( + const BaseSubobjectInfo *Info, CharUnits Offset) { // We don't have to keep looking past the maximum offset that's known to // contain an empty class. if (!AnyEmptySubobjectsBeyondOffset(Offset)) @@ -299,7 +296,8 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, // Traverse all member variables. unsigned FieldNo = 0; for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), - E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + E = Info->Class->field_end(); + I != E; ++I, ++FieldNo) { if (I->isBitField()) continue; @@ -346,7 +344,8 @@ void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info, // Traverse all member variables. unsigned FieldNo = 0; for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), - E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + E = Info->Class->field_end(); + I != E; ++I, ++FieldNo) { if (I->isBitField()) continue; @@ -371,10 +370,9 @@ bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, return true; } -bool -EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, - const CXXRecordDecl *Class, - CharUnits Offset) const { +bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset( + const CXXRecordDecl *RD, const CXXRecordDecl *Class, + CharUnits Offset) const { // We don't have to keep looking past the maximum offset that's known to // contain an empty class. if (!AnyEmptySubobjectsBeyondOffset(Offset)) @@ -424,9 +422,8 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, return true; } -bool -EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, - CharUnits Offset) const { +bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, + CharUnits Offset) const { // We don't have to keep looking past the maximum offset that's known to // contain an empty class. if (!AnyEmptySubobjectsBeyondOffset(Offset)) @@ -464,9 +461,8 @@ EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, return true; } -bool -EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, - CharUnits Offset) { +bool EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, + CharUnits Offset) { if (!CanPlaceFieldSubobjectAtOffset(FD, Offset)) return false; @@ -561,7 +557,7 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD, } } -typedef llvm::SmallPtrSet ClassSetTy; +typedef llvm::SmallPtrSet ClassSetTy; class ItaniumRecordLayoutBuilder { protected: @@ -664,15 +660,14 @@ class ItaniumRecordLayoutBuilder { EmptySubobjectMap *EmptySubobjects) : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()), - UnadjustedAlignment(CharUnits::One()), - UseExternalLayout(false), InferAlignment(false), Packed(false), - IsUnion(false), IsMac68kAlign(false), IsMsStruct(false), - UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0), - MaxFieldAlignment(CharUnits::Zero()), DataSize(0), - NonVirtualSize(CharUnits::Zero()), + UnadjustedAlignment(CharUnits::One()), UseExternalLayout(false), + InferAlignment(false), Packed(false), IsUnion(false), + IsMac68kAlign(false), IsMsStruct(false), UnfilledBitsInLastUnit(0), + LastBitfieldTypeSize(0), MaxFieldAlignment(CharUnits::Zero()), + DataSize(0), NonVirtualSize(CharUnits::Zero()), NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr), - PrimaryBaseIsVirtual(false), HasOwnVFPtr(false), - HasPackedField(false), FirstNearlyEmptyVBase(nullptr) {} + PrimaryBaseIsVirtual(false), HasOwnVFPtr(false), HasPackedField(false), + FirstNearlyEmptyVBase(nullptr) {} void Layout(const RecordDecl *D); void Layout(const CXXRecordDecl *D); @@ -684,15 +679,13 @@ class ItaniumRecordLayoutBuilder { bool FieldPacked, const FieldDecl *D); void LayoutBitField(const FieldDecl *D); - TargetCXXABI getCXXABI() const { - return Context.getTargetInfo().getCXXABI(); - } + TargetCXXABI getCXXABI() const { return Context.getTargetInfo().getCXXABI(); } /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects. llvm::SpecificBumpPtrAllocator BaseSubobjectInfoAllocator; typedef llvm::DenseMap - BaseSubobjectInfoMapTy; + BaseSubobjectInfoMapTy; /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases /// of the class we're laying out to their base subobject info. @@ -761,8 +754,8 @@ class ItaniumRecordLayoutBuilder { uint64_t ComputedOffset); void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset, - uint64_t UnpackedOffset, unsigned UnpackedAlign, - bool isPacked, const FieldDecl *D); + uint64_t UnpackedOffset, unsigned UnpackedAlign, + bool isPacked, const FieldDecl *D); DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); @@ -933,8 +926,7 @@ BaseSubobjectInfo *ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo( // Traversing the bases must have created the base info for our primary // virtual base. PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase); - assert(PrimaryVirtualBaseInfo && - "Did not create a primary virtual base!"); + assert(PrimaryVirtualBaseInfo && "Did not create a primary virtual base!"); // Claim the primary virtual base as our primary virtual base. Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo; @@ -952,13 +944,12 @@ void ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo( const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl(); // Compute the base subobject info for this base. - BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, - nullptr); + BaseSubobjectInfo *Info = + ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, nullptr); if (IsVirtual) { // ComputeBaseInfo has already added this base for us. - assert(VirtualBaseInfo.count(BaseDecl) && - "Did not add virtual base!"); + assert(VirtualBaseInfo.count(BaseDecl) && "Did not add virtual base!"); } else { // Add the base info to the map of non-virtual bases. assert(!NonVirtualBaseInfo.count(BaseDecl) && @@ -1012,21 +1003,21 @@ void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases( LayoutVirtualBase(PrimaryBaseInfo); } else { BaseSubobjectInfo *PrimaryBaseInfo = - NonVirtualBaseInfo.lookup(PrimaryBase); + NonVirtualBaseInfo.lookup(PrimaryBase); assert(PrimaryBaseInfo && "Did not find base info for non-virtual primary base!"); LayoutNonVirtualBase(PrimaryBaseInfo); } - // If this class needs a vtable/vf-table and didn't get one from a - // primary base, add it in now. + // If this class needs a vtable/vf-table and didn't get one from a + // primary base, add it in now. } else if (RD->isDynamicClass()) { assert(DataSize == 0 && "Vtable pointer must be at offset zero!"); CharUnits PtrWidth = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); CharUnits PtrAlign = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); EnsureVTablePointerAlignment(PtrAlign); HasOwnVFPtr = true; setSize(getSize() + PtrWidth); @@ -1156,8 +1147,8 @@ void ItaniumRecordLayoutBuilder::LayoutVirtualBase( // Add its base class offset. assert(!VBases.count(Base->Class) && "vbase offset already exists!"); - VBases.insert(std::make_pair(Base->Class, - ASTRecordLayout::VBaseInfo(Offset, false))); + VBases.insert( + std::make_pair(Base->Class, ASTRecordLayout::VBaseInfo(Offset, false))); AddPrimaryVirtualBaseOffsets(Base, Offset); } @@ -1166,7 +1157,6 @@ CharUnits ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class); - CharUnits Offset; // Query the external layout to see if it provides an offset. @@ -1362,18 +1352,19 @@ void ItaniumRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { // the future, this will need to be tweakable by targets. bool InsertExtraPadding = D->mayInsertExtraPadding(/*EmitRemark=*/true); bool HasFlexibleArrayMember = D->hasFlexibleArrayMember(); + for (auto I = D->field_begin(), End = D->field_end(); I != End; ++I) { auto Next(I); ++Next; + LayoutField(*I, InsertExtraPadding && (Next != End || !HasFlexibleArrayMember)); } } // Rounds the specified size to have it a multiple of the char size. -static uint64_t -roundUpSizeToCharAlignment(uint64_t Size, - const ASTContext &Context) { +static uint64_t roundUpSizeToCharAlignment(uint64_t Size, + const ASTContext &Context) { uint64_t CharAlignment = Context.getTargetInfo().getCharAlign(); return llvm::alignTo(Size, CharAlignment); } @@ -1390,9 +1381,8 @@ void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize, // sizeof(T')*8 <= n. QualType IntegralPODTypes[] = { - Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy, - Context.UnsignedLongTy, Context.UnsignedLongLongTy - }; + Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy, + Context.UnsignedLongTy, Context.UnsignedLongLongTy}; QualType Type; for (const QualType &QT : IntegralPODTypes) { @@ -1415,8 +1405,7 @@ void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize, uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; if (IsUnion) { - uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, - Context); + uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, Context); setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize)); FieldOffset = 0; } else { @@ -1536,7 +1525,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // Compute the next available bit offset. uint64_t FieldOffset = - IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit); + IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit); // Handle targets that don't honor bitfield type alignment. if (!IsMsStruct && !Context.getTargetInfo().useBitFieldTypeAlignment()) { @@ -1546,10 +1535,10 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // The alignment to round up to is the max of the field's natural // alignment and a target-specific fixed value (sometimes zero). unsigned ZeroLengthBitfieldBoundary = - Context.getTargetInfo().getZeroLengthBitfieldBoundary(); + Context.getTargetInfo().getZeroLengthBitfieldBoundary(); FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary); - // If that doesn't apply, just ignore the field alignment. + // If that doesn't apply, just ignore the field alignment. } else { FieldAlign = 1; } @@ -1614,7 +1603,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // Compute the real offset. if (FieldSize == 0 || (AllowPadding && - (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) { + (FieldOffset & (FieldAlign - 1)) + FieldSize > TypeSize)) { FieldOffset = llvm::alignTo(FieldOffset, FieldAlign); } else if (ExplicitFieldAlign && (MaxFieldAlignmentInBits == 0 || @@ -1628,7 +1617,8 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // Repeat the computation for diagnostic purposes. if (FieldSize == 0 || (AllowPadding && - (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize)) + (UnpackedFieldOffset & (UnpackedFieldAlign - 1)) + FieldSize > + TypeSize)) UnpackedFieldOffset = llvm::alignTo(UnpackedFieldOffset, UnpackedFieldAlign); else if (ExplicitFieldAlign && @@ -1670,17 +1660,17 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { uint64_t RoundedFieldSize; if (IsMsStruct) { RoundedFieldSize = - (FieldSize ? TypeSize : Context.getTargetInfo().getCharWidth()); + (FieldSize ? TypeSize : Context.getTargetInfo().getCharWidth()); - // Otherwise, allocate just the number of bytes required to store - // the bitfield. + // Otherwise, allocate just the number of bytes required to store + // the bitfield. } else { RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, Context); } setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize)); - // For non-zero-width bitfields in ms_struct structs, allocate a new - // storage unit if necessary. + // For non-zero-width bitfields in ms_struct structs, allocate a new + // storage unit if necessary. } else if (IsMsStruct && FieldSize) { // We should have cleared UnfilledBitsInLastUnit in every case // where we changed storage units. @@ -1691,9 +1681,9 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { UnfilledBitsInLastUnit -= FieldSize; LastBitfieldTypeSize = TypeSize; - // Otherwise, bump the data size up to include the bitfield, - // including padding up to char alignment, and then remember how - // bits we didn't use. + // Otherwise, bump the data size up to include the bitfield, + // including padding up to char alignment, and then remember how + // bits we didn't use. } else { uint64_t NewSizeInBits = FieldOffset + FieldSize; uint64_t CharAlignment = Context.getTargetInfo().getCharAlign(); @@ -1730,8 +1720,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, LastBitfieldTypeSize = 0; bool FieldPacked = Packed || D->hasAttr(); - CharUnits FieldOffset = - IsUnion ? CharUnits::Zero() : getDataSize(); + CharUnits FieldOffset = IsUnion ? CharUnits::Zero() : getDataSize(); CharUnits FieldSize; CharUnits FieldAlign; @@ -1741,17 +1730,17 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // Flexible array members don't have any size, but they // have to be aligned appropriately for their element type. FieldSize = CharUnits::Zero(); - const ArrayType* ATy = Context.getAsArrayType(D->getType()); + const ArrayType *ATy = Context.getAsArrayType(D->getType()); FieldAlign = Context.getTypeAlignInChars(ATy->getElementType()); } else if (const ReferenceType *RT = D->getType()->getAs()) { unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType()); - FieldSize = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS)); - FieldAlign = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS)); + FieldSize = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerWidth(AS)); + FieldAlign = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerAlign(AS)); } else { std::pair FieldInfo = - Context.getTypeInfoInChars(D->getType()); + Context.getTypeInfoInChars(D->getType()); FieldSize = FieldInfo.first; FieldAlign = FieldInfo.second; @@ -1805,7 +1794,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (FieldPacked) FieldAlign = CharUnits::One(); CharUnits MaxAlignmentInChars = - Context.toCharUnitsFromBits(D->getMaxAlignment()); + Context.toCharUnitsFromBits(D->getMaxAlignment()); FieldAlign = std::max(FieldAlign, MaxAlignmentInChars); UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars); @@ -1821,7 +1810,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (UseExternalLayout) { FieldOffset = Context.toCharUnitsFromBits( - updateExternalFieldOffset(D, Context.toBits(FieldOffset))); + updateExternalFieldOffset(D, Context.toBits(FieldOffset))); if (!IsUnion && EmptySubobjects) { // Record the fact that we're placing a field at this offset. @@ -1880,8 +1869,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) { // array of zero-length, remains of Size 0 if (RD->isEmpty()) setSize(CharUnits::One()); - } - else + } else setSize(CharUnits::One()); } @@ -1919,8 +1907,7 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) { InBits = false; } Diag(RD->getLocation(), diag::warn_padded_struct_size) - << Context.getTypeDeclType(RD) - << PadSize + << Context.getTypeDeclType(RD) << PadSize << (InBits ? 1 : 0); // (byte|bit) } @@ -1977,10 +1964,14 @@ ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field, /// \returns diagnostic %select index. static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { - case TTK_Struct: return 0; - case TTK_Interface: return 1; - case TTK_Class: return 2; - default: llvm_unreachable("Invalid tag kind for field padding diagnostic!"); + case TTK_Struct: + return 0; + case TTK_Interface: + return 1; + case TTK_Class: + return 2; + default: + llvm_unreachable("Invalid tag kind for field padding diagnostic!"); } } @@ -2010,20 +2001,18 @@ void ItaniumRecordLayoutBuilder::CheckFieldPadding( if (D->getIdentifier()) Diag(D->getLocation(), diag::warn_padded_struct_field) << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) - << Context.getTypeDeclType(D->getParent()) - << PadSize + << Context.getTypeDeclType(D->getParent()) << PadSize << (InBits ? 1 : 0) // (byte|bit) << D->getIdentifier(); else Diag(D->getLocation(), diag::warn_padded_struct_anon_field) << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) - << Context.getTypeDeclType(D->getParent()) - << PadSize + << Context.getTypeDeclType(D->getParent()) << PadSize << (InBits ? 1 : 0); // (byte|bit) - } - if (isPacked && Offset != UnpackedOffset) { - HasPackedField = true; - } + } + if (isPacked && Offset != UnpackedOffset) { + HasPackedField = true; + } } static const CXXMethodDecl *computeKeyFunction(ASTContext &Context, @@ -2047,7 +2036,7 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context, return nullptr; bool allowInlineFunctions = - Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline(); + Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline(); for (const CXXMethodDecl *MD : RD->methods()) { if (!MD->isVirtual()) @@ -2255,9 +2244,11 @@ struct MicrosoftRecordLayoutBuilder { }; typedef llvm::DenseMap BaseOffsetsMapTy; MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {} + private: MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete; void operator=(const MicrosoftRecordLayoutBuilder &) = delete; + public: void layout(const RecordDecl *RD); void cxxLayout(const CXXRecordDecl *RD); @@ -2380,15 +2371,15 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo( // the alignment in the case of pragam pack. Note that the required alignment // doesn't actually apply to the struct alignment at this point. Alignment = std::max(Alignment, Info.Alignment); - RequiredAlignment = std::max(RequiredAlignment, Layout.getRequiredAlignment()); + RequiredAlignment = + std::max(RequiredAlignment, Layout.getRequiredAlignment()); Info.Alignment = std::max(Info.Alignment, Layout.getRequiredAlignment()); Info.Size = Layout.getNonVirtualSize(); return Info; } MicrosoftRecordLayoutBuilder::ElementInfo -MicrosoftRecordLayoutBuilder::getAdjustedElementInfo( - const FieldDecl *FD) { +MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(const FieldDecl *FD) { // Get the alignment of the field type's natural alignment, ignore any // alignment attributes. ElementInfo Info; @@ -2411,8 +2402,8 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo( FD->getType()->getBaseElementTypeUnsafe()->getAs()) { auto const &Layout = Context.getASTRecordLayout(RT->getDecl()); EndsWithZeroSizedObject = Layout.endsWithZeroSizedObject(); - FieldRequiredAlignment = std::max(FieldRequiredAlignment, - Layout.getRequiredAlignment()); + FieldRequiredAlignment = + std::max(FieldRequiredAlignment, Layout.getRequiredAlignment()); } // Capture required alignment as a side-effect. RequiredAlignment = std::max(RequiredAlignment, FieldRequiredAlignment); @@ -2474,10 +2465,11 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { MaxFieldAlignment = CharUnits::Zero(); // Honor the default struct packing maximum alignment flag. if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) - MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment); + MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment); // Honor the packing attribute. The MS-ABI ignores pragma pack if its larger // than the pointer size. - if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr()){ + if (const MaxFieldAlignmentAttr *MFAA = + RD->getAttr()) { unsigned PackedAlignment = MFAA->getAlignment(); if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0)) MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment); @@ -2494,8 +2486,8 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { External.BaseOffsets, External.VirtualBaseOffsets); } -void -MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) { +void MicrosoftRecordLayoutBuilder::initializeCXXLayout( + const CXXRecordDecl *RD) { EndsWithZeroSizedObject = false; LeadsWithZeroSizedBase = false; HasOwnVFPtr = false; @@ -2513,8 +2505,8 @@ MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) { PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment); } -void -MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) { +void MicrosoftRecordLayoutBuilder::layoutNonVirtualBases( + const CXXRecordDecl *RD) { // The MS-ABI lays out all bases that contain leading vfptrs before it lays // out any bases that do not contain vfptrs. We implement this as two passes // over the bases. This approach guarantees that the primary base is laid out @@ -2602,8 +2594,7 @@ static bool recordUsesEBO(const RecordDecl *RD) { } void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase( - const CXXRecordDecl *RD, - const CXXRecordDecl *BaseDecl, + const CXXRecordDecl *RD, const CXXRecordDecl *BaseDecl, const ASTRecordLayout &BaseLayout, const ASTRecordLayout *&PreviousBaseLayout) { // Insert padding between two bases if the left first one is zero sized or @@ -2709,8 +2700,8 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) { } } -void -MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) { +void MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField( + const FieldDecl *FD) { // Zero-width bitfields are ignored unless they follow a non-zero-width // bitfield. if (!LastFieldIsNonZeroWidthBitfield) { @@ -2836,8 +2827,8 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) { assert(BaseOffset >= Size && "base offset already allocated"); - VBases.insert(std::make_pair(BaseDecl, - ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp))); + VBases.insert(std::make_pair( + BaseDecl, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp))); Size = BaseOffset + BaseLayout.getNonVirtualSize(); PreviousBaseLayout = &BaseLayout; } @@ -2877,10 +2868,9 @@ void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) { // Recursively walks the non-virtual bases of a class and determines if any of // them are in the bases with overridden methods set. -static bool -RequiresVtordisp(const llvm::SmallPtrSetImpl & - BasesWithOverriddenMethods, - const CXXRecordDecl *RD) { +static bool RequiresVtordisp(const llvm::SmallPtrSetImpl + &BasesWithOverriddenMethods, + const CXXRecordDecl *RD) { if (BasesWithOverriddenMethods.count(RD)) return true; // If any of a virtual bases non-virtual bases (recursively) requires a @@ -2969,7 +2959,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { // until we *finish* parsing the definition. if (D->hasExternalLexicalStorage() && !D->getDefinition()) - getExternalSource()->CompleteType(const_cast(D)); + getExternalSource()->CompleteType(const_cast(D)); D = D->getDefinition(); assert(D && "Cannot get layout of forward declarations!"); @@ -2980,29 +2970,43 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { // Note that we can't save a reference to the entry because this function // is recursive. const ASTRecordLayout *Entry = ASTRecordLayouts[D]; - if (Entry) return *Entry; + if (Entry) + return *Entry; const ASTRecordLayout *NewEntry = nullptr; + bool ShouldBeRandomized = + (RandstructAutoSelect && Randstruct::isTriviallyRandomizable(D)) || + D->getAttr() != nullptr; + if (ShouldBeRandomized) { + // There is no technical benefit to randomizing the fields of a union + // since they all share the same offset of zero. + if (D->isUnion()) { + getDiagnostics().Report(D->getLocation(), + diag::warn_randomize_attr_union); + } else { + Randstruct randstruct(RandstructSeed); + randstruct.reorganizeFields(*this, D); + } + } + if (isMsLayout(*this)) { MicrosoftRecordLayoutBuilder Builder(*this); if (const auto *RD = dyn_cast(D)) { Builder.cxxLayout(RD); NewEntry = new (*this) ASTRecordLayout( *this, Builder.Size, Builder.Alignment, Builder.Alignment, - Builder.RequiredAlignment, - Builder.HasOwnVFPtr, Builder.HasOwnVFPtr || Builder.PrimaryBase, - Builder.VBPtrOffset, Builder.DataSize, Builder.FieldOffsets, - Builder.NonVirtualSize, Builder.Alignment, CharUnits::Zero(), - Builder.PrimaryBase, false, Builder.SharedVBPtrBase, - Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase, - Builder.Bases, Builder.VBases); + Builder.RequiredAlignment, Builder.HasOwnVFPtr, + Builder.HasOwnVFPtr || Builder.PrimaryBase, Builder.VBPtrOffset, + Builder.DataSize, Builder.FieldOffsets, Builder.NonVirtualSize, + Builder.Alignment, CharUnits::Zero(), Builder.PrimaryBase, false, + Builder.SharedVBPtrBase, Builder.EndsWithZeroSizedObject, + Builder.LeadsWithZeroSizedBase, Builder.Bases, Builder.VBases); } else { Builder.layout(D); NewEntry = new (*this) ASTRecordLayout( *this, Builder.Size, Builder.Alignment, Builder.Alignment, - Builder.RequiredAlignment, - Builder.Size, Builder.FieldOffsets); + Builder.RequiredAlignment, Builder.Size, Builder.FieldOffsets); } } else { if (const auto *RD = dyn_cast(D)) { @@ -3022,7 +3026,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { CharUnits NonVirtualSize = skipTailPadding ? DataSize : Builder.NonVirtualSize; NewEntry = new (*this) ASTRecordLayout( - *this, Builder.getSize(), Builder.Alignment, Builder.UnadjustedAlignment, + *this, Builder.getSize(), Builder.Alignment, + Builder.UnadjustedAlignment, /*RequiredAlignment : used by MS-ABI)*/ Builder.Alignment, Builder.HasOwnVFPtr, RD->isDynamicClass(), CharUnits::fromQuantity(-1), DataSize, Builder.FieldOffsets, @@ -3035,7 +3040,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { Builder.Layout(D); NewEntry = new (*this) ASTRecordLayout( - *this, Builder.getSize(), Builder.Alignment, Builder.UnadjustedAlignment, + *this, Builder.getSize(), Builder.Alignment, + Builder.UnadjustedAlignment, /*RequiredAlignment : used by MS-ABI)*/ Builder.Alignment, Builder.getSize(), Builder.FieldOffsets); } @@ -3051,7 +3057,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { return *NewEntry; } -const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) { +const CXXMethodDecl * +ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) { if (!getTargetInfo().getCXXABI().hasKeyFunctions()) return nullptr; @@ -3069,7 +3076,7 @@ const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) // Store it back if it changed. if (Entry.isOffset() || Entry.isValid() != bool(Result)) - KeyFunctions[RD] = const_cast(Result); + KeyFunctions[RD] = const_cast(Result); return cast_or_null(Result); } @@ -3085,7 +3092,8 @@ void ASTContext::setNonKeyFunction(const CXXMethodDecl *Method) { auto I = Map.find(Method->getParent()); // If it's not cached, there's nothing to do. - if (I == Map.end()) return; + if (I == Map.end()) + return; // If it is cached, check whether it's the target method, and if so, // remove it from the cache. Note, the call to 'get' might invalidate @@ -3141,8 +3149,8 @@ uint64_t ASTContext::lookupFieldBitOffset(const ObjCInterfaceDecl *OID, // directly. unsigned Index = 0; - for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin(); - IVD; IVD = IVD->getNextIvar()) { + for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin(); IVD; + IVD = IVD->getNextIvar()) { if (Ivar == IVD) break; ++Index; @@ -3162,13 +3170,13 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D, const ObjCImplementationDecl *Impl) const { // Retrieve the definition if (D->hasExternalLexicalStorage() && !D->getDefinition()) - getExternalSource()->CompleteType(const_cast(D)); + getExternalSource()->CompleteType(const_cast(D)); D = D->getDefinition(); assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!"); // Look up this layout, if already laid out, return what we have. const ObjCContainerDecl *Key = - Impl ? (const ObjCContainerDecl*) Impl : (const ObjCContainerDecl*) D; + Impl ? (const ObjCContainerDecl *)Impl : (const ObjCContainerDecl *)D; if (const ASTRecordLayout *Entry = ObjCLayouts[Key]) return *Entry; @@ -3186,22 +3194,18 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D, ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr); Builder.Layout(D); - const ASTRecordLayout *NewEntry = - new (*this) ASTRecordLayout(*this, Builder.getSize(), - Builder.Alignment, - Builder.UnadjustedAlignment, - /*RequiredAlignment : used by MS-ABI)*/ - Builder.Alignment, - Builder.getDataSize(), - Builder.FieldOffsets); + const ASTRecordLayout *NewEntry = new (*this) ASTRecordLayout( + *this, Builder.getSize(), Builder.Alignment, Builder.UnadjustedAlignment, + /*RequiredAlignment : used by MS-ABI)*/ + Builder.Alignment, Builder.getDataSize(), Builder.FieldOffsets); ObjCLayouts[Key] = NewEntry; return *NewEntry; } -static void PrintOffset(raw_ostream &OS, - CharUnits Offset, unsigned IndentLevel) { +static void PrintOffset(raw_ostream &OS, CharUnits Offset, + unsigned IndentLevel) { OS << llvm::format("%10" PRId64 " | ", (int64_t)Offset.getQuantity()); OS.indent(IndentLevel * 2); } @@ -3230,17 +3234,14 @@ static void PrintIndentNoOffset(raw_ostream &OS, unsigned IndentLevel) { } static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, - const ASTContext &C, - CharUnits Offset, - unsigned IndentLevel, - const char* Description, - bool PrintSizeInfo, - bool IncludeVirtualBases) { + const ASTContext &C, CharUnits Offset, + unsigned IndentLevel, const char *Description, + bool PrintSizeInfo, bool IncludeVirtualBases) { const ASTRecordLayout &Layout = C.getASTRecordLayout(RD); auto CXXRD = dyn_cast(RD); PrintOffset(OS, Offset, IndentLevel); - OS << C.getTypeDeclType(const_cast(RD)).getAsString(); + OS << C.getTypeDeclType(const_cast(RD)).getAsString(); if (Description) OS << ' ' << Description; if (CXXRD && CXXRD->isEmpty()) @@ -3277,8 +3278,9 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, // Sort nvbases by offset. std::stable_sort(Bases.begin(), Bases.end(), [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { - return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); - }); + return Layout.getBaseClassOffset(L) < + Layout.getBaseClassOffset(R); + }); // Dump (non-virtual) bases for (const CXXRecordDecl *Base : Bases) { @@ -3298,12 +3300,12 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, // Dump fields. uint64_t FieldNo = 0; - for (RecordDecl::field_iterator I = RD->field_begin(), - E = RD->field_end(); I != E; ++I, ++FieldNo) { + for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++FieldNo) { const FieldDecl &Field = **I; uint64_t LocalFieldOffsetInBits = Layout.getFieldOffset(FieldNo); CharUnits FieldOffset = - Offset + C.toCharUnitsFromBits(LocalFieldOffsetInBits); + Offset + C.toCharUnitsFromBits(LocalFieldOffsetInBits); // Recursively dump fields of record type. if (auto RT = Field.getType()->getAs()) { @@ -3328,7 +3330,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, // Dump virtual bases. if (CXXRD && IncludeVirtualBases) { const ASTRecordLayout::VBaseOffsetsMapTy &VtorDisps = - Layout.getVBaseOffsetsMap(); + Layout.getVBaseOffsetsMap(); for (const CXXBaseSpecifier &Base : CXXRD->vbases()) { assert(Base.isVirtual() && "Found non-virtual class!"); @@ -3342,14 +3344,16 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, } DumpRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel, - VBase == Layout.getPrimaryBase() ? - "(primary virtual base)" : "(virtual base)", + VBase == Layout.getPrimaryBase() + ? "(primary virtual base)" + : "(virtual base)", /*PrintSizeInfo=*/false, /*IncludeVirtualBases=*/false); } } - if (!PrintSizeInfo) return; + if (!PrintSizeInfo) + return; PrintIndentNoOffset(OS, IndentLevel - 1); OS << "[sizeof=" << Layout.getSize().getQuantity(); @@ -3366,12 +3370,11 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, OS << "]\n"; } -void ASTContext::DumpRecordLayout(const RecordDecl *RD, - raw_ostream &OS, +void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS, bool Simple) const { if (!Simple) { ::DumpRecordLayout(OS, RD, *this, CharUnits(), 0, nullptr, - /*PrintSizeInfo*/true, + /*PrintSizeInfo*/ true, /*IncludeVirtualBases=*/true); return; } @@ -3393,7 +3396,8 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, OS << " Alignment:" << toBits(Info.getAlignment()) << "\n"; OS << " FieldOffsets: ["; for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) { - if (i) OS << ", "; + if (i) + OS << ", "; OS << Info.getFieldOffset(i); } OS << "]>\n"; diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index e7fb3fe403c564..7edade7c758bff 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -4405,6 +4405,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(Args.MakeArgString(Twine(N))); } + // -randstruct-seed parent process + if (Arg *A = Args.getLastArg(options::OPT_randstruct_seed_EQ)) { + CmdArgs.push_back( "-randstruct-seed" ); + CmdArgs.push_back(A->getValue(0)); + } + + if (Arg *A = Args.getLastArg(options::OPT_randstruct_auto)) { + CmdArgs.push_back( "-randstruct-auto" ); + } + // -fvisibility= and -fvisibility-ms-compat are of a piece. if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ, options::OPT_fvisibility_ms_compat)) { diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 1a33a00004d606..312c95d8b18b9c 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "clang/AST/RandstructSeed.h" #include "clang/Frontend/CompilerInvocation.h" #include "TestModuleFileExtension.h" #include "clang/Basic/Builtins.h" @@ -1669,6 +1670,13 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Opts.ProgramAction = frontend::PluginAction; Opts.ActionName = A->getValue(); } + // child process handle arguments + if (const Arg* A = Args.getLastArg(OPT_randstruct_seed)) { + RandstructSeed = A->getValue(0); + } + if (const Arg* A = Args.getLastArg(OPT_randstruct_auto)) { + RandstructAutoSelect = true; + } Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin); for (const auto *AA : Args.filtered(OPT_plugin_arg)) Opts.PluginArgs[AA->getValue(0)].emplace_back(AA->getValue(1)); diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index 322de02c950e1a..d1f0513da1219f 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -6853,6 +6853,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, handleSimpleAttributeWithExclusions(S, D, AL); break; + case ParsedAttr::AT_RandomizeLayout: + handleSimpleAttribute(S, D, AL); + break; case ParsedAttr::AT_CodeSeg: handleCodeSegAttr(S, D, AL); break; diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test index 45012ae9bfdd11..8915de50f668dc 100644 --- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test +++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test @@ -117,6 +117,7 @@ // CHECK-NEXT: Overloadable (SubjectMatchRule_function) // CHECK-NEXT: ParamTypestate (SubjectMatchRule_variable_is_parameter) // CHECK-NEXT: PassObjectSize (SubjectMatchRule_variable_is_parameter) +// CHECK-NEXT: RandomizeLayout (SubjectMatchRule_record) // CHECK-NEXT: RenderScriptKernel (SubjectMatchRule_function) // CHECK-NEXT: ReqdWorkGroupSize (SubjectMatchRule_function) // CHECK-NEXT: RequireConstantInit (SubjectMatchRule_variable_is_global)