Greg McGary | 2124ca1 | 2020-08-20 20:05:13 | [diff] [blame^] | 1 | //===- UnwindInfoSection.cpp ----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "UnwindInfoSection.h" |
| 10 | #include "Config.h" |
| 11 | #include "InputSection.h" |
| 12 | #include "MergedOutputSection.h" |
| 13 | #include "OutputSection.h" |
| 14 | #include "OutputSegment.h" |
| 15 | #include "Symbols.h" |
| 16 | #include "SyntheticSections.h" |
| 17 | #include "Target.h" |
| 18 | |
| 19 | #include "lld/Common/ErrorHandler.h" |
| 20 | #include "llvm/ADT/SmallVector.h" |
| 21 | #include "llvm/BinaryFormat/MachO.h" |
| 22 | |
| 23 | using namespace llvm; |
| 24 | using namespace llvm::MachO; |
| 25 | using namespace lld; |
| 26 | using namespace lld::macho; |
| 27 | |
| 28 | // Compact Unwind format is a Mach-O evolution of DWARF Unwind that |
| 29 | // optimizes space and exception-time lookup. Most DWARF unwind |
| 30 | // entries can be replaced with Compact Unwind entries, but the ones |
| 31 | // that cannot are retained in DWARF form. |
| 32 | // |
| 33 | // This comment will address macro-level organization of the pre-link |
| 34 | // and post-link compact unwind tables. For micro-level organization |
| 35 | // pertaining to the bitfield layout of the 32-bit compact unwind |
| 36 | // entries, see libunwind/include/mach-o/compact_unwind_encoding.h |
| 37 | // |
| 38 | // Important clarifying factoids: |
| 39 | // |
| 40 | // * __LD,__compact_unwind is the compact unwind format for compiler |
| 41 | // output and linker input. It is never a final output. It could be |
| 42 | // an intermediate output with the `-r` option which retains relocs. |
| 43 | // |
| 44 | // * __TEXT,__unwind_info is the compact unwind format for final |
| 45 | // linker output. It is never an input. |
| 46 | // |
| 47 | // * __TEXT,__eh_frame is the DWARF format for both linker input and output. |
| 48 | // |
| 49 | // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd |
| 50 | // level) by ascending address, and the pages are referenced by an |
| 51 | // index (1st level) in the section header. |
| 52 | // |
| 53 | // * Following the headers in __TEXT,__unwind_info, the bulk of the |
| 54 | // section contains a vector of compact unwind entries |
| 55 | // `{functionOffset, encoding}` sorted by ascending `functionOffset`. |
| 56 | // Adjacent entries with the same encoding can be folded to great |
| 57 | // advantage, achieving a 3-order-of-magnitude reduction in the |
| 58 | // number of entries. |
| 59 | // |
| 60 | // * The __TEXT,__unwind_info format can accommodate up to 127 unique |
| 61 | // encodings for the space-efficient compressed format. In practice, |
| 62 | // fewer than a dozen unique encodings are used by C++ programs of |
| 63 | // all sizes. Therefore, we don't even bother implementing the regular |
| 64 | // non-compressed format. Time will tell if anyone in the field ever |
| 65 | // overflows the 127-encodings limit. |
| 66 | |
| 67 | // TODO(gkm): prune __eh_frame entries superseded by __unwind_info |
| 68 | // TODO(gkm): how do we align the 2nd-level pages? |
| 69 | |
| 70 | UnwindInfoSection::UnwindInfoSection() |
| 71 | : SyntheticSection(segment_names::text, section_names::unwindInfo) {} |
| 72 | |
| 73 | bool UnwindInfoSection::isNeeded() const { |
| 74 | return (compactUnwindSection != nullptr); |
| 75 | } |
| 76 | |
| 77 | // Scan the __LD,__compact_unwind entries and compute the space needs of |
| 78 | // __TEXT,__unwind_info and __TEXT,__eh_frame |
| 79 | |
| 80 | void UnwindInfoSection::finalize() { |
| 81 | if (compactUnwindSection == nullptr) |
| 82 | return; |
| 83 | |
| 84 | // At this point, the address space for __TEXT,__text has been |
| 85 | // assigned, so we can relocate the __LD,__compact_unwind entries |
| 86 | // into a temporary buffer. Relocation is necessary in order to sort |
| 87 | // the CU entries by function address. Sorting is necessary so that |
| 88 | // we can fold adjacent CU entries with identical |
| 89 | // encoding+personality+lsda. Folding is necessary because it reduces |
| 90 | // the number of CU entries by as much as 3 orders of magnitude! |
| 91 | compactUnwindSection->finalize(); |
| 92 | assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry64) == 0); |
| 93 | size_t cuCount = |
| 94 | compactUnwindSection->getSize() / sizeof(CompactUnwindEntry64); |
| 95 | cuVector.resize(cuCount); |
| 96 | // Relocate all __LD,__compact_unwind entries |
| 97 | compactUnwindSection->writeTo(reinterpret_cast<uint8_t *>(cuVector.data())); |
| 98 | |
| 99 | // Rather than sort & fold the 32-byte entries directly, we create a |
| 100 | // vector of pointers to entries and sort & fold that instead. |
| 101 | cuPtrVector.reserve(cuCount); |
| 102 | for (const auto &cuEntry : cuVector) |
| 103 | cuPtrVector.emplace_back(&cuEntry); |
| 104 | std::sort(cuPtrVector.begin(), cuPtrVector.end(), |
| 105 | [](const CompactUnwindEntry64 *a, const CompactUnwindEntry64 *b) { |
| 106 | return a->functionAddress < b->functionAddress; |
| 107 | }); |
| 108 | |
| 109 | // Fold adjacent entries with matching encoding+personality+lsda |
| 110 | // We use three iterators on the same cuPtrVector to fold in-situ: |
| 111 | // (1) `foldBegin` is the first of a potential sequence of matching entries |
| 112 | // (2) `foldEnd` is the first non-matching entry after `foldBegin`. |
| 113 | // The semi-open interval [ foldBegin .. foldEnd ) contains a range |
| 114 | // entries that can be folded into a single entry and written to ... |
| 115 | // (3) `foldWrite` |
| 116 | auto foldWrite = cuPtrVector.begin(); |
| 117 | for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) { |
| 118 | auto foldEnd = foldBegin; |
| 119 | while (++foldEnd < cuPtrVector.end() && |
| 120 | (*foldBegin)->encoding == (*foldEnd)->encoding && |
| 121 | (*foldBegin)->personality == (*foldEnd)->personality && |
| 122 | (*foldBegin)->lsda == (*foldEnd)->lsda) |
| 123 | ; |
| 124 | *foldWrite++ = *foldBegin; |
| 125 | foldBegin = foldEnd; |
| 126 | } |
| 127 | cuPtrVector.erase(foldWrite, cuPtrVector.end()); |
| 128 | |
| 129 | // Count frequencies of the folded encodings |
| 130 | llvm::DenseMap<compact_unwind_encoding_t, size_t> encodingFrequencies; |
| 131 | for (auto cuPtrEntry : cuPtrVector) |
| 132 | encodingFrequencies[cuPtrEntry->encoding]++; |
| 133 | if (encodingFrequencies.size() > UNWIND_INFO_COMMON_ENCODINGS_MAX) |
| 134 | error("TODO(gkm): handle common encodings table overflow"); |
| 135 | |
| 136 | // Make a table of encodings, sorted by descending frequency |
| 137 | for (const auto &frequency : encodingFrequencies) |
| 138 | commonEncodings.emplace_back(frequency); |
| 139 | std::sort(commonEncodings.begin(), commonEncodings.end(), |
| 140 | [](const std::pair<compact_unwind_encoding_t, size_t> &a, |
| 141 | const std::pair<compact_unwind_encoding_t, size_t> &b) { |
| 142 | if (a.second == b.second) |
| 143 | // When frequencies match, secondarily sort on encoding |
| 144 | // to maintain parity with validate-unwind-info.py |
| 145 | return a.first > b.first; |
| 146 | return a.second > b.second; |
| 147 | }); |
| 148 | |
| 149 | // Split folded encodings into pages, limited by capacity of a page |
| 150 | // and the 24-bit range of function offset |
| 151 | // |
| 152 | // Record the page splits as a vector of iterators on cuPtrVector |
| 153 | // such that successive elements form a semi-open interval. E.g., |
| 154 | // page X's bounds are thus: [ pageBounds[X] .. pageBounds[X+1] ) |
| 155 | // |
| 156 | // Note that pageBounds.size() is one greater than the number of |
| 157 | // pages, and pageBounds.back() holds the sentinel cuPtrVector.cend() |
| 158 | pageBounds.push_back(cuPtrVector.cbegin()); |
| 159 | // TODO(gkm): cut 1st page entries short to accommodate section headers ??? |
| 160 | CompactUnwindEntry64 cuEntryKey; |
| 161 | for (size_t i = 0;;) { |
| 162 | // Limit the search to entries that can fit within a 4 KiB page. |
| 163 | const auto pageBegin = pageBounds[0] + i; |
| 164 | const auto pageMax = |
| 165 | pageBounds[0] + |
| 166 | std::min(i + UNWIND_INFO_COMPRESSED_SECOND_LEVEL_ENTRIES_MAX, |
| 167 | cuPtrVector.size()); |
| 168 | // Exclude entries with functionOffset that would overflow 24 bits |
| 169 | cuEntryKey.functionAddress = (*pageBegin)->functionAddress + |
| 170 | UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET_MASK; |
| 171 | const auto pageBreak = std::lower_bound( |
| 172 | pageBegin, pageMax, &cuEntryKey, |
| 173 | [](const CompactUnwindEntry64 *a, const CompactUnwindEntry64 *b) { |
| 174 | return a->functionAddress < b->functionAddress; |
| 175 | }); |
| 176 | pageBounds.push_back(pageBreak); |
| 177 | if (pageBreak == cuPtrVector.cend()) |
| 178 | break; |
| 179 | i = pageBreak - cuPtrVector.cbegin(); |
| 180 | } |
| 181 | |
| 182 | // compute size of __TEXT,__unwind_info section |
| 183 | level2PagesOffset = |
| 184 | sizeof(unwind_info_section_header) + |
| 185 | commonEncodings.size() * sizeof(uint32_t) + |
| 186 | personalities.size() * sizeof(uint32_t) + |
| 187 | pageBounds.size() * sizeof(unwind_info_section_header_index_entry) + |
| 188 | lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry); |
| 189 | unwindInfoSize = level2PagesOffset + |
| 190 | (pageBounds.size() - 1) * |
| 191 | sizeof(unwind_info_compressed_second_level_page_header) + |
| 192 | cuPtrVector.size() * sizeof(uint32_t); |
| 193 | } |
| 194 | |
| 195 | // All inputs are relocated and output adddresses are known, so write! |
| 196 | |
| 197 | void UnwindInfoSection::writeTo(uint8_t *buf) const { |
| 198 | // section header |
| 199 | auto *uip = reinterpret_cast<unwind_info_section_header *>(buf); |
| 200 | uip->version = 1; |
| 201 | uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header); |
| 202 | uip->commonEncodingsArrayCount = commonEncodings.size(); |
| 203 | uip->personalityArraySectionOffset = |
| 204 | uip->commonEncodingsArraySectionOffset + |
| 205 | (uip->commonEncodingsArrayCount * sizeof(uint32_t)); |
| 206 | uip->personalityArrayCount = personalities.size(); |
| 207 | uip->indexSectionOffset = uip->personalityArraySectionOffset + |
| 208 | (uip->personalityArrayCount * sizeof(uint32_t)); |
| 209 | uip->indexCount = pageBounds.size(); |
| 210 | |
| 211 | // Common encodings |
| 212 | auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]); |
| 213 | for (const auto &encoding : commonEncodings) |
| 214 | *i32p++ = encoding.first; |
| 215 | |
| 216 | // Personalities |
| 217 | for (const auto &personality : personalities) |
| 218 | *i32p++ = personality; |
| 219 | |
| 220 | // Level-1 index |
| 221 | uint32_t lsdaOffset = |
| 222 | uip->indexSectionOffset + |
| 223 | uip->indexCount * sizeof(unwind_info_section_header_index_entry); |
| 224 | uint64_t l2PagesOffset = level2PagesOffset; |
| 225 | auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p); |
| 226 | for (size_t i = 0; i < pageBounds.size() - 1; i++) { |
| 227 | iep->functionOffset = (*pageBounds[i])->functionAddress; |
| 228 | iep->secondLevelPagesSectionOffset = l2PagesOffset; |
| 229 | iep->lsdaIndexArraySectionOffset = lsdaOffset; |
| 230 | iep++; |
| 231 | // TODO(gkm): pad to 4 KiB page boundary ??? |
| 232 | size_t entryCount = pageBounds[i + 1] - pageBounds[i]; |
| 233 | uint64_t pageSize = sizeof(unwind_info_section_header_index_entry) + |
| 234 | entryCount * sizeof(uint32_t); |
| 235 | l2PagesOffset += pageSize; |
| 236 | } |
| 237 | // Level-1 sentinel |
| 238 | const CompactUnwindEntry64 &cuEnd = cuVector.back(); |
| 239 | iep->functionOffset = cuEnd.functionAddress + cuEnd.functionLength; |
| 240 | iep->secondLevelPagesSectionOffset = 0; |
| 241 | iep->lsdaIndexArraySectionOffset = lsdaOffset; |
| 242 | iep++; |
| 243 | |
| 244 | // LSDAs |
| 245 | auto *lep = |
| 246 | reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep); |
| 247 | for (const auto &lsda : lsdaEntries) { |
| 248 | lep->functionOffset = lsda.functionOffset; |
| 249 | lep->lsdaOffset = lsda.lsdaOffset; |
| 250 | } |
| 251 | |
| 252 | // create map from encoding to common-encoding-table index compact |
| 253 | // encoding entries use 7 bits to index the common-encoding table |
| 254 | size_t i = 0; |
| 255 | llvm::DenseMap<compact_unwind_encoding_t, size_t> commonEncodingIndexes; |
| 256 | for (const auto &encoding : commonEncodings) |
| 257 | commonEncodingIndexes[encoding.first] = i++; |
| 258 | |
| 259 | // Level-2 pages |
| 260 | auto *p2p = |
| 261 | reinterpret_cast<unwind_info_compressed_second_level_page_header *>(lep); |
| 262 | for (size_t i = 0; i < pageBounds.size() - 1; i++) { |
| 263 | p2p->kind = UNWIND_SECOND_LEVEL_COMPRESSED; |
| 264 | p2p->entryPageOffset = |
| 265 | sizeof(unwind_info_compressed_second_level_page_header); |
| 266 | p2p->entryCount = pageBounds[i + 1] - pageBounds[i]; |
| 267 | p2p->encodingsPageOffset = |
| 268 | p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t); |
| 269 | p2p->encodingsCount = 0; |
| 270 | auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]); |
| 271 | auto cuPtrVectorIt = pageBounds[i]; |
| 272 | uintptr_t functionAddressBase = (*cuPtrVectorIt)->functionAddress; |
| 273 | while (cuPtrVectorIt < pageBounds[i + 1]) { |
| 274 | const CompactUnwindEntry64 *cuep = *cuPtrVectorIt++; |
| 275 | size_t cueIndex = commonEncodingIndexes.lookup(cuep->encoding); |
| 276 | *ep++ = ((cueIndex << UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET_BITS) | |
| 277 | (cuep->functionAddress - functionAddressBase)); |
| 278 | } |
| 279 | p2p = |
| 280 | reinterpret_cast<unwind_info_compressed_second_level_page_header *>(ep); |
| 281 | } |
| 282 | assert(getSize() == |
| 283 | static_cast<size_t>((reinterpret_cast<uint8_t *>(p2p) - buf))); |
| 284 | } |