File: | llvm/lib/MC/MCAssembler.cpp |
Warning: | line 582, column 15 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "llvm/MC/MCAssembler.h" | |||
10 | #include "llvm/ADT/ArrayRef.h" | |||
11 | #include "llvm/ADT/SmallString.h" | |||
12 | #include "llvm/ADT/SmallVector.h" | |||
13 | #include "llvm/ADT/Statistic.h" | |||
14 | #include "llvm/ADT/StringRef.h" | |||
15 | #include "llvm/ADT/Twine.h" | |||
16 | #include "llvm/MC/MCAsmBackend.h" | |||
17 | #include "llvm/MC/MCAsmInfo.h" | |||
18 | #include "llvm/MC/MCAsmLayout.h" | |||
19 | #include "llvm/MC/MCCodeEmitter.h" | |||
20 | #include "llvm/MC/MCCodeView.h" | |||
21 | #include "llvm/MC/MCContext.h" | |||
22 | #include "llvm/MC/MCDwarf.h" | |||
23 | #include "llvm/MC/MCExpr.h" | |||
24 | #include "llvm/MC/MCFixup.h" | |||
25 | #include "llvm/MC/MCFixupKindInfo.h" | |||
26 | #include "llvm/MC/MCFragment.h" | |||
27 | #include "llvm/MC/MCInst.h" | |||
28 | #include "llvm/MC/MCObjectWriter.h" | |||
29 | #include "llvm/MC/MCSection.h" | |||
30 | #include "llvm/MC/MCSectionELF.h" | |||
31 | #include "llvm/MC/MCSymbol.h" | |||
32 | #include "llvm/MC/MCValue.h" | |||
33 | #include "llvm/Support/Alignment.h" | |||
34 | #include "llvm/Support/Casting.h" | |||
35 | #include "llvm/Support/Debug.h" | |||
36 | #include "llvm/Support/ErrorHandling.h" | |||
37 | #include "llvm/Support/LEB128.h" | |||
38 | #include "llvm/Support/MathExtras.h" | |||
39 | #include "llvm/Support/raw_ostream.h" | |||
40 | #include <cassert> | |||
41 | #include <cstdint> | |||
42 | #include <cstring> | |||
43 | #include <tuple> | |||
44 | #include <utility> | |||
45 | ||||
46 | using namespace llvm; | |||
47 | ||||
48 | #define DEBUG_TYPE"assembler" "assembler" | |||
49 | ||||
50 | namespace { | |||
51 | namespace stats { | |||
52 | ||||
53 | STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total")static llvm::Statistic EmittedFragments = {"assembler", "EmittedFragments" , "Number of emitted assembler fragments - total"}; | |||
54 | STATISTIC(EmittedRelaxableFragments,static llvm::Statistic EmittedRelaxableFragments = {"assembler" , "EmittedRelaxableFragments", "Number of emitted assembler fragments - relaxable" } | |||
55 | "Number of emitted assembler fragments - relaxable")static llvm::Statistic EmittedRelaxableFragments = {"assembler" , "EmittedRelaxableFragments", "Number of emitted assembler fragments - relaxable" }; | |||
56 | STATISTIC(EmittedDataFragments,static llvm::Statistic EmittedDataFragments = {"assembler", "EmittedDataFragments" , "Number of emitted assembler fragments - data"} | |||
57 | "Number of emitted assembler fragments - data")static llvm::Statistic EmittedDataFragments = {"assembler", "EmittedDataFragments" , "Number of emitted assembler fragments - data"}; | |||
58 | STATISTIC(EmittedCompactEncodedInstFragments,static llvm::Statistic EmittedCompactEncodedInstFragments = { "assembler", "EmittedCompactEncodedInstFragments", "Number of emitted assembler fragments - compact encoded inst" } | |||
59 | "Number of emitted assembler fragments - compact encoded inst")static llvm::Statistic EmittedCompactEncodedInstFragments = { "assembler", "EmittedCompactEncodedInstFragments", "Number of emitted assembler fragments - compact encoded inst" }; | |||
60 | STATISTIC(EmittedAlignFragments,static llvm::Statistic EmittedAlignFragments = {"assembler", "EmittedAlignFragments" , "Number of emitted assembler fragments - align"} | |||
61 | "Number of emitted assembler fragments - align")static llvm::Statistic EmittedAlignFragments = {"assembler", "EmittedAlignFragments" , "Number of emitted assembler fragments - align"}; | |||
62 | STATISTIC(EmittedFillFragments,static llvm::Statistic EmittedFillFragments = {"assembler", "EmittedFillFragments" , "Number of emitted assembler fragments - fill"} | |||
63 | "Number of emitted assembler fragments - fill")static llvm::Statistic EmittedFillFragments = {"assembler", "EmittedFillFragments" , "Number of emitted assembler fragments - fill"}; | |||
64 | STATISTIC(EmittedOrgFragments,static llvm::Statistic EmittedOrgFragments = {"assembler", "EmittedOrgFragments" , "Number of emitted assembler fragments - org"} | |||
65 | "Number of emitted assembler fragments - org")static llvm::Statistic EmittedOrgFragments = {"assembler", "EmittedOrgFragments" , "Number of emitted assembler fragments - org"}; | |||
66 | STATISTIC(evaluateFixup, "Number of evaluated fixups")static llvm::Statistic evaluateFixup = {"assembler", "evaluateFixup" , "Number of evaluated fixups"}; | |||
67 | STATISTIC(FragmentLayouts, "Number of fragment layouts")static llvm::Statistic FragmentLayouts = {"assembler", "FragmentLayouts" , "Number of fragment layouts"}; | |||
68 | STATISTIC(ObjectBytes, "Number of emitted object file bytes")static llvm::Statistic ObjectBytes = {"assembler", "ObjectBytes" , "Number of emitted object file bytes"}; | |||
69 | STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps")static llvm::Statistic RelaxationSteps = {"assembler", "RelaxationSteps" , "Number of assembler layout and relaxation steps"}; | |||
70 | STATISTIC(RelaxedInstructions, "Number of relaxed instructions")static llvm::Statistic RelaxedInstructions = {"assembler", "RelaxedInstructions" , "Number of relaxed instructions"}; | |||
71 | ||||
72 | } // end namespace stats | |||
73 | } // end anonymous namespace | |||
74 | ||||
75 | // FIXME FIXME FIXME: There are number of places in this file where we convert | |||
76 | // what is a 64-bit assembler value used for computation into a value in the | |||
77 | // object file, which may truncate it. We should detect that truncation where | |||
78 | // invalid and report errors back. | |||
79 | ||||
80 | /* *** */ | |||
81 | ||||
82 | MCAssembler::MCAssembler(MCContext &Context, | |||
83 | std::unique_ptr<MCAsmBackend> Backend, | |||
84 | std::unique_ptr<MCCodeEmitter> Emitter, | |||
85 | std::unique_ptr<MCObjectWriter> Writer) | |||
86 | : Context(Context), Backend(std::move(Backend)), | |||
87 | Emitter(std::move(Emitter)), Writer(std::move(Writer)), | |||
88 | BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), | |||
89 | IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { | |||
90 | VersionInfo.Major = 0; // Major version == 0 for "none specified" | |||
91 | } | |||
92 | ||||
93 | MCAssembler::~MCAssembler() = default; | |||
94 | ||||
95 | void MCAssembler::reset() { | |||
96 | Sections.clear(); | |||
97 | Symbols.clear(); | |||
98 | IndirectSymbols.clear(); | |||
99 | DataRegions.clear(); | |||
100 | LinkerOptions.clear(); | |||
101 | FileNames.clear(); | |||
102 | ThumbFuncs.clear(); | |||
103 | BundleAlignSize = 0; | |||
104 | RelaxAll = false; | |||
105 | SubsectionsViaSymbols = false; | |||
106 | IncrementalLinkerCompatible = false; | |||
107 | ELFHeaderEFlags = 0; | |||
108 | LOHContainer.reset(); | |||
109 | VersionInfo.Major = 0; | |||
110 | VersionInfo.SDKVersion = VersionTuple(); | |||
111 | ||||
112 | // reset objects owned by us | |||
113 | if (getBackendPtr()) | |||
114 | getBackendPtr()->reset(); | |||
115 | if (getEmitterPtr()) | |||
116 | getEmitterPtr()->reset(); | |||
117 | if (getWriterPtr()) | |||
118 | getWriterPtr()->reset(); | |||
119 | getLOHContainer().reset(); | |||
120 | } | |||
121 | ||||
122 | bool MCAssembler::registerSection(MCSection &Section) { | |||
123 | if (Section.isRegistered()) | |||
124 | return false; | |||
125 | Sections.push_back(&Section); | |||
126 | Section.setIsRegistered(true); | |||
127 | return true; | |||
128 | } | |||
129 | ||||
130 | bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { | |||
131 | if (ThumbFuncs.count(Symbol)) | |||
132 | return true; | |||
133 | ||||
134 | if (!Symbol->isVariable()) | |||
135 | return false; | |||
136 | ||||
137 | const MCExpr *Expr = Symbol->getVariableValue(); | |||
138 | ||||
139 | MCValue V; | |||
140 | if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) | |||
141 | return false; | |||
142 | ||||
143 | if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) | |||
144 | return false; | |||
145 | ||||
146 | const MCSymbolRefExpr *Ref = V.getSymA(); | |||
147 | if (!Ref) | |||
148 | return false; | |||
149 | ||||
150 | if (Ref->getKind() != MCSymbolRefExpr::VK_None) | |||
151 | return false; | |||
152 | ||||
153 | const MCSymbol &Sym = Ref->getSymbol(); | |||
154 | if (!isThumbFunc(&Sym)) | |||
155 | return false; | |||
156 | ||||
157 | ThumbFuncs.insert(Symbol); // Cache it. | |||
158 | return true; | |||
159 | } | |||
160 | ||||
161 | bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { | |||
162 | // Non-temporary labels should always be visible to the linker. | |||
163 | if (!Symbol.isTemporary()) | |||
164 | return true; | |||
165 | ||||
166 | if (Symbol.isUsedInReloc()) | |||
167 | return true; | |||
168 | ||||
169 | return false; | |||
170 | } | |||
171 | ||||
172 | const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { | |||
173 | // Linker visible symbols define atoms. | |||
174 | if (isSymbolLinkerVisible(S)) | |||
175 | return &S; | |||
176 | ||||
177 | // Absolute and undefined symbols have no defining atom. | |||
178 | if (!S.isInSection()) | |||
179 | return nullptr; | |||
180 | ||||
181 | // Non-linker visible symbols in sections which can't be atomized have no | |||
182 | // defining atom. | |||
183 | if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( | |||
184 | *S.getFragment()->getParent())) | |||
185 | return nullptr; | |||
186 | ||||
187 | // Otherwise, return the atom for the containing fragment. | |||
188 | return S.getFragment()->getAtom(); | |||
189 | } | |||
190 | ||||
191 | bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, | |||
192 | const MCFixup &Fixup, const MCFragment *DF, | |||
193 | MCValue &Target, uint64_t &Value, | |||
194 | bool &WasForced) const { | |||
195 | ++stats::evaluateFixup; | |||
196 | ||||
197 | // FIXME: This code has some duplication with recordRelocation. We should | |||
198 | // probably merge the two into a single callback that tries to evaluate a | |||
199 | // fixup and records a relocation if one is needed. | |||
200 | ||||
201 | // On error claim to have completely evaluated the fixup, to prevent any | |||
202 | // further processing from being done. | |||
203 | const MCExpr *Expr = Fixup.getValue(); | |||
204 | MCContext &Ctx = getContext(); | |||
205 | Value = 0; | |||
206 | WasForced = false; | |||
207 | if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { | |||
208 | Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); | |||
209 | return true; | |||
210 | } | |||
211 | if (const MCSymbolRefExpr *RefB = Target.getSymB()) { | |||
212 | if (RefB->getKind() != MCSymbolRefExpr::VK_None) { | |||
213 | Ctx.reportError(Fixup.getLoc(), | |||
214 | "unsupported subtraction of qualified symbol"); | |||
215 | return true; | |||
216 | } | |||
217 | } | |||
218 | ||||
219 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 219, __PRETTY_FUNCTION__)); | |||
220 | bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & | |||
221 | MCFixupKindInfo::FKF_IsPCRel; | |||
222 | ||||
223 | bool IsResolved = false; | |||
224 | if (IsPCRel) { | |||
225 | if (Target.getSymB()) { | |||
226 | IsResolved = false; | |||
227 | } else if (!Target.getSymA()) { | |||
228 | IsResolved = false; | |||
229 | } else { | |||
230 | const MCSymbolRefExpr *A = Target.getSymA(); | |||
231 | const MCSymbol &SA = A->getSymbol(); | |||
232 | if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { | |||
233 | IsResolved = false; | |||
234 | } else if (auto *Writer = getWriterPtr()) { | |||
235 | IsResolved = Writer->isSymbolRefDifferenceFullyResolvedImpl( | |||
236 | *this, SA, *DF, false, true); | |||
237 | } | |||
238 | } | |||
239 | } else { | |||
240 | IsResolved = Target.isAbsolute(); | |||
241 | } | |||
242 | ||||
243 | Value = Target.getConstant(); | |||
244 | ||||
245 | if (const MCSymbolRefExpr *A = Target.getSymA()) { | |||
246 | const MCSymbol &Sym = A->getSymbol(); | |||
247 | if (Sym.isDefined()) | |||
248 | Value += Layout.getSymbolOffset(Sym); | |||
249 | } | |||
250 | if (const MCSymbolRefExpr *B = Target.getSymB()) { | |||
251 | const MCSymbol &Sym = B->getSymbol(); | |||
252 | if (Sym.isDefined()) | |||
253 | Value -= Layout.getSymbolOffset(Sym); | |||
254 | } | |||
255 | ||||
256 | bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & | |||
257 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; | |||
258 | assert((ShouldAlignPC ? IsPCRel : true) &&(((ShouldAlignPC ? IsPCRel : true) && "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!" ) ? static_cast<void> (0) : __assert_fail ("(ShouldAlignPC ? IsPCRel : true) && \"FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 259, __PRETTY_FUNCTION__)) | |||
259 | "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!")(((ShouldAlignPC ? IsPCRel : true) && "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!" ) ? static_cast<void> (0) : __assert_fail ("(ShouldAlignPC ? IsPCRel : true) && \"FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 259, __PRETTY_FUNCTION__)); | |||
260 | ||||
261 | if (IsPCRel) { | |||
262 | uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); | |||
263 | ||||
264 | // A number of ARM fixups in Thumb mode require that the effective PC | |||
265 | // address be determined as the 32-bit aligned version of the actual offset. | |||
266 | if (ShouldAlignPC) Offset &= ~0x3; | |||
267 | Value -= Offset; | |||
268 | } | |||
269 | ||||
270 | // Let the backend force a relocation if needed. | |||
271 | if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { | |||
272 | IsResolved = false; | |||
273 | WasForced = true; | |||
274 | } | |||
275 | ||||
276 | return IsResolved; | |||
277 | } | |||
278 | ||||
279 | uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, | |||
280 | const MCFragment &F) const { | |||
281 | assert(getBackendPtr() && "Requires assembler backend")((getBackendPtr() && "Requires assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Requires assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 281, __PRETTY_FUNCTION__)); | |||
282 | switch (F.getKind()) { | |||
283 | case MCFragment::FT_Data: | |||
284 | return cast<MCDataFragment>(F).getContents().size(); | |||
285 | case MCFragment::FT_Relaxable: | |||
286 | return cast<MCRelaxableFragment>(F).getContents().size(); | |||
287 | case MCFragment::FT_CompactEncodedInst: | |||
288 | return cast<MCCompactEncodedInstFragment>(F).getContents().size(); | |||
289 | case MCFragment::FT_Fill: { | |||
290 | auto &FF = cast<MCFillFragment>(F); | |||
291 | int64_t NumValues = 0; | |||
292 | if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { | |||
293 | getContext().reportError(FF.getLoc(), | |||
294 | "expected assembly-time absolute expression"); | |||
295 | return 0; | |||
296 | } | |||
297 | int64_t Size = NumValues * FF.getValueSize(); | |||
298 | if (Size < 0) { | |||
299 | getContext().reportError(FF.getLoc(), "invalid number of bytes"); | |||
300 | return 0; | |||
301 | } | |||
302 | return Size; | |||
303 | } | |||
304 | ||||
305 | case MCFragment::FT_LEB: | |||
306 | return cast<MCLEBFragment>(F).getContents().size(); | |||
307 | ||||
308 | case MCFragment::FT_BoundaryAlign: | |||
309 | return cast<MCBoundaryAlignFragment>(F).getSize(); | |||
310 | ||||
311 | case MCFragment::FT_SymbolId: | |||
312 | return 4; | |||
313 | ||||
314 | case MCFragment::FT_Align: { | |||
315 | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | |||
316 | unsigned Offset = Layout.getFragmentOffset(&AF); | |||
317 | unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); | |||
318 | ||||
319 | // Insert extra Nops for code alignment if the target define | |||
320 | // shouldInsertExtraNopBytesForCodeAlign target hook. | |||
321 | if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && | |||
322 | getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) | |||
323 | return Size; | |||
324 | ||||
325 | // If we are padding with nops, force the padding to be larger than the | |||
326 | // minimum nop size. | |||
327 | if (Size > 0 && AF.hasEmitNops()) { | |||
328 | while (Size % getBackend().getMinimumNopSize()) | |||
329 | Size += AF.getAlignment(); | |||
330 | } | |||
331 | if (Size > AF.getMaxBytesToEmit()) | |||
332 | return 0; | |||
333 | return Size; | |||
334 | } | |||
335 | ||||
336 | case MCFragment::FT_Org: { | |||
337 | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | |||
338 | MCValue Value; | |||
339 | if (!OF.getOffset().evaluateAsValue(Value, Layout)) { | |||
340 | getContext().reportError(OF.getLoc(), | |||
341 | "expected assembly-time absolute expression"); | |||
342 | return 0; | |||
343 | } | |||
344 | ||||
345 | uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); | |||
346 | int64_t TargetLocation = Value.getConstant(); | |||
347 | if (const MCSymbolRefExpr *A = Value.getSymA()) { | |||
348 | uint64_t Val; | |||
349 | if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { | |||
350 | getContext().reportError(OF.getLoc(), "expected absolute expression"); | |||
351 | return 0; | |||
352 | } | |||
353 | TargetLocation += Val; | |||
354 | } | |||
355 | int64_t Size = TargetLocation - FragmentOffset; | |||
356 | if (Size < 0 || Size >= 0x40000000) { | |||
357 | getContext().reportError( | |||
358 | OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + | |||
359 | "' (at offset '" + Twine(FragmentOffset) + "')"); | |||
360 | return 0; | |||
361 | } | |||
362 | return Size; | |||
363 | } | |||
364 | ||||
365 | case MCFragment::FT_Dwarf: | |||
366 | return cast<MCDwarfLineAddrFragment>(F).getContents().size(); | |||
367 | case MCFragment::FT_DwarfFrame: | |||
368 | return cast<MCDwarfCallFrameFragment>(F).getContents().size(); | |||
369 | case MCFragment::FT_CVInlineLines: | |||
370 | return cast<MCCVInlineLineTableFragment>(F).getContents().size(); | |||
371 | case MCFragment::FT_CVDefRange: | |||
372 | return cast<MCCVDefRangeFragment>(F).getContents().size(); | |||
373 | case MCFragment::FT_Dummy: | |||
374 | llvm_unreachable("Should not have been added")::llvm::llvm_unreachable_internal("Should not have been added" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 374); | |||
375 | } | |||
376 | ||||
377 | llvm_unreachable("invalid fragment kind")::llvm::llvm_unreachable_internal("invalid fragment kind", "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 377); | |||
378 | } | |||
379 | ||||
380 | void MCAsmLayout::layoutFragment(MCFragment *F) { | |||
381 | MCFragment *Prev = F->getPrevNode(); | |||
382 | ||||
383 | // We should never try to recompute something which is valid. | |||
384 | assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!")((!isFragmentValid(F) && "Attempt to recompute a valid fragment!" ) ? static_cast<void> (0) : __assert_fail ("!isFragmentValid(F) && \"Attempt to recompute a valid fragment!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 384, __PRETTY_FUNCTION__)); | |||
385 | // We should never try to compute the fragment layout if its predecessor | |||
386 | // isn't valid. | |||
387 | assert((!Prev || isFragmentValid(Prev)) &&(((!Prev || isFragmentValid(Prev)) && "Attempt to compute fragment before its predecessor!" ) ? static_cast<void> (0) : __assert_fail ("(!Prev || isFragmentValid(Prev)) && \"Attempt to compute fragment before its predecessor!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 388, __PRETTY_FUNCTION__)) | |||
388 | "Attempt to compute fragment before its predecessor!")(((!Prev || isFragmentValid(Prev)) && "Attempt to compute fragment before its predecessor!" ) ? static_cast<void> (0) : __assert_fail ("(!Prev || isFragmentValid(Prev)) && \"Attempt to compute fragment before its predecessor!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 388, __PRETTY_FUNCTION__)); | |||
389 | ||||
390 | ++stats::FragmentLayouts; | |||
391 | ||||
392 | // Compute fragment offset and size. | |||
393 | if (Prev) | |||
394 | F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); | |||
395 | else | |||
396 | F->Offset = 0; | |||
397 | LastValidFragment[F->getParent()] = F; | |||
398 | ||||
399 | // If bundling is enabled and this fragment has instructions in it, it has to | |||
400 | // obey the bundling restrictions. With padding, we'll have: | |||
401 | // | |||
402 | // | |||
403 | // BundlePadding | |||
404 | // ||| | |||
405 | // ------------------------------------- | |||
406 | // Prev |##########| F | | |||
407 | // ------------------------------------- | |||
408 | // ^ | |||
409 | // | | |||
410 | // F->Offset | |||
411 | // | |||
412 | // The fragment's offset will point to after the padding, and its computed | |||
413 | // size won't include the padding. | |||
414 | // | |||
415 | // When the -mc-relax-all flag is used, we optimize bundling by writting the | |||
416 | // padding directly into fragments when the instructions are emitted inside | |||
417 | // the streamer. When the fragment is larger than the bundle size, we need to | |||
418 | // ensure that it's bundle aligned. This means that if we end up with | |||
419 | // multiple fragments, we must emit bundle padding between fragments. | |||
420 | // | |||
421 | // ".align N" is an example of a directive that introduces multiple | |||
422 | // fragments. We could add a special case to handle ".align N" by emitting | |||
423 | // within-fragment padding (which would produce less padding when N is less | |||
424 | // than the bundle size), but for now we don't. | |||
425 | // | |||
426 | if (Assembler.isBundlingEnabled() && F->hasInstructions()) { | |||
427 | assert(isa<MCEncodedFragment>(F) &&((isa<MCEncodedFragment>(F) && "Only MCEncodedFragment implementations have instructions" ) ? static_cast<void> (0) : __assert_fail ("isa<MCEncodedFragment>(F) && \"Only MCEncodedFragment implementations have instructions\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 428, __PRETTY_FUNCTION__)) | |||
428 | "Only MCEncodedFragment implementations have instructions")((isa<MCEncodedFragment>(F) && "Only MCEncodedFragment implementations have instructions" ) ? static_cast<void> (0) : __assert_fail ("isa<MCEncodedFragment>(F) && \"Only MCEncodedFragment implementations have instructions\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 428, __PRETTY_FUNCTION__)); | |||
429 | MCEncodedFragment *EF = cast<MCEncodedFragment>(F); | |||
430 | uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); | |||
431 | ||||
432 | if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) | |||
433 | report_fatal_error("Fragment can't be larger than a bundle size"); | |||
434 | ||||
435 | uint64_t RequiredBundlePadding = | |||
436 | computeBundlePadding(Assembler, EF, EF->Offset, FSize); | |||
437 | if (RequiredBundlePadding > UINT8_MAX(255)) | |||
438 | report_fatal_error("Padding cannot exceed 255 bytes"); | |||
439 | EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); | |||
440 | EF->Offset += RequiredBundlePadding; | |||
441 | } | |||
442 | } | |||
443 | ||||
444 | void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { | |||
445 | bool New = !Symbol.isRegistered(); | |||
446 | if (Created) | |||
447 | *Created = New; | |||
448 | if (New) { | |||
449 | Symbol.setIsRegistered(true); | |||
450 | Symbols.push_back(&Symbol); | |||
451 | } | |||
452 | } | |||
453 | ||||
454 | void MCAssembler::writeFragmentPadding(raw_ostream &OS, | |||
455 | const MCEncodedFragment &EF, | |||
456 | uint64_t FSize) const { | |||
457 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 457, __PRETTY_FUNCTION__)); | |||
458 | // Should NOP padding be written out before this fragment? | |||
459 | unsigned BundlePadding = EF.getBundlePadding(); | |||
460 | if (BundlePadding > 0) { | |||
461 | assert(isBundlingEnabled() &&((isBundlingEnabled() && "Writing bundle padding with disabled bundling" ) ? static_cast<void> (0) : __assert_fail ("isBundlingEnabled() && \"Writing bundle padding with disabled bundling\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 462, __PRETTY_FUNCTION__)) | |||
462 | "Writing bundle padding with disabled bundling")((isBundlingEnabled() && "Writing bundle padding with disabled bundling" ) ? static_cast<void> (0) : __assert_fail ("isBundlingEnabled() && \"Writing bundle padding with disabled bundling\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 462, __PRETTY_FUNCTION__)); | |||
463 | assert(EF.hasInstructions() &&((EF.hasInstructions() && "Writing bundle padding for a fragment without instructions" ) ? static_cast<void> (0) : __assert_fail ("EF.hasInstructions() && \"Writing bundle padding for a fragment without instructions\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 464, __PRETTY_FUNCTION__)) | |||
464 | "Writing bundle padding for a fragment without instructions")((EF.hasInstructions() && "Writing bundle padding for a fragment without instructions" ) ? static_cast<void> (0) : __assert_fail ("EF.hasInstructions() && \"Writing bundle padding for a fragment without instructions\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 464, __PRETTY_FUNCTION__)); | |||
465 | ||||
466 | unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); | |||
467 | if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { | |||
468 | // If the padding itself crosses a bundle boundary, it must be emitted | |||
469 | // in 2 pieces, since even nop instructions must not cross boundaries. | |||
470 | // v--------------v <- BundleAlignSize | |||
471 | // v---------v <- BundlePadding | |||
472 | // ---------------------------- | |||
473 | // | Prev |####|####| F | | |||
474 | // ---------------------------- | |||
475 | // ^-------------------^ <- TotalLength | |||
476 | unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); | |||
477 | if (!getBackend().writeNopData(OS, DistanceToBoundary)) | |||
478 | report_fatal_error("unable to write NOP sequence of " + | |||
479 | Twine(DistanceToBoundary) + " bytes"); | |||
480 | BundlePadding -= DistanceToBoundary; | |||
481 | } | |||
482 | if (!getBackend().writeNopData(OS, BundlePadding)) | |||
483 | report_fatal_error("unable to write NOP sequence of " + | |||
484 | Twine(BundlePadding) + " bytes"); | |||
485 | } | |||
486 | } | |||
487 | ||||
488 | /// Write the fragment \p F to the output file. | |||
489 | static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, | |||
490 | const MCAsmLayout &Layout, const MCFragment &F) { | |||
491 | // FIXME: Embed in fragments instead? | |||
492 | uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); | |||
493 | ||||
494 | support::endianness Endian = Asm.getBackend().Endian; | |||
495 | ||||
496 | if (const MCEncodedFragment *EF
| |||
497 | Asm.writeFragmentPadding(OS, *EF, FragmentSize); | |||
498 | ||||
499 | // This variable (and its dummy usage) is to participate in the assert at | |||
500 | // the end of the function. | |||
501 | uint64_t Start = OS.tell(); | |||
502 | (void) Start; | |||
503 | ||||
504 | ++stats::EmittedFragments; | |||
505 | ||||
506 | switch (F.getKind()) { | |||
507 | case MCFragment::FT_Align: { | |||
508 | ++stats::EmittedAlignFragments; | |||
509 | const MCAlignFragment &AF = cast<MCAlignFragment>(F); | |||
510 | assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!")((AF.getValueSize() && "Invalid virtual align in concrete fragment!" ) ? static_cast<void> (0) : __assert_fail ("AF.getValueSize() && \"Invalid virtual align in concrete fragment!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 510, __PRETTY_FUNCTION__)); | |||
511 | ||||
512 | uint64_t Count = FragmentSize / AF.getValueSize(); | |||
513 | ||||
514 | // FIXME: This error shouldn't actually occur (the front end should emit | |||
515 | // multiple .align directives to enforce the semantics it wants), but is | |||
516 | // severe enough that we want to report it. How to handle this? | |||
517 | if (Count * AF.getValueSize() != FragmentSize) | |||
518 | report_fatal_error("undefined .align directive, value size '" + | |||
519 | Twine(AF.getValueSize()) + | |||
520 | "' is not a divisor of padding size '" + | |||
521 | Twine(FragmentSize) + "'"); | |||
522 | ||||
523 | // See if we are aligning with nops, and if so do that first to try to fill | |||
524 | // the Count bytes. Then if that did not fill any bytes or there are any | |||
525 | // bytes left to fill use the Value and ValueSize to fill the rest. | |||
526 | // If we are aligning with nops, ask that target to emit the right data. | |||
527 | if (AF.hasEmitNops()) { | |||
528 | if (!Asm.getBackend().writeNopData(OS, Count)) | |||
529 | report_fatal_error("unable to write nop sequence of " + | |||
530 | Twine(Count) + " bytes"); | |||
531 | break; | |||
532 | } | |||
533 | ||||
534 | // Otherwise, write out in multiples of the value size. | |||
535 | for (uint64_t i = 0; i != Count; ++i) { | |||
536 | switch (AF.getValueSize()) { | |||
537 | default: llvm_unreachable("Invalid size!")::llvm::llvm_unreachable_internal("Invalid size!", "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 537); | |||
538 | case 1: OS << char(AF.getValue()); break; | |||
539 | case 2: | |||
540 | support::endian::write<uint16_t>(OS, AF.getValue(), Endian); | |||
541 | break; | |||
542 | case 4: | |||
543 | support::endian::write<uint32_t>(OS, AF.getValue(), Endian); | |||
544 | break; | |||
545 | case 8: | |||
546 | support::endian::write<uint64_t>(OS, AF.getValue(), Endian); | |||
547 | break; | |||
548 | } | |||
549 | } | |||
550 | break; | |||
551 | } | |||
552 | ||||
553 | case MCFragment::FT_Data: | |||
554 | ++stats::EmittedDataFragments; | |||
555 | OS << cast<MCDataFragment>(F).getContents(); | |||
556 | break; | |||
557 | ||||
558 | case MCFragment::FT_Relaxable: | |||
559 | ++stats::EmittedRelaxableFragments; | |||
560 | OS << cast<MCRelaxableFragment>(F).getContents(); | |||
561 | break; | |||
562 | ||||
563 | case MCFragment::FT_CompactEncodedInst: | |||
564 | ++stats::EmittedCompactEncodedInstFragments; | |||
565 | OS << cast<MCCompactEncodedInstFragment>(F).getContents(); | |||
566 | break; | |||
567 | ||||
568 | case MCFragment::FT_Fill: { | |||
569 | ++stats::EmittedFillFragments; | |||
570 | const MCFillFragment &FF = cast<MCFillFragment>(F); | |||
571 | uint64_t V = FF.getValue(); | |||
572 | unsigned VSize = FF.getValueSize(); | |||
573 | const unsigned MaxChunkSize = 16; | |||
574 | char Data[MaxChunkSize]; | |||
575 | // Duplicate V into Data as byte vector to reduce number of | |||
576 | // writes done. As such, do endian conversion here. | |||
577 | for (unsigned I = 0; I != VSize; ++I) { | |||
578 | unsigned index = Endian == support::little ? I : (VSize - I - 1); | |||
579 | Data[I] = uint8_t(V >> (index * 8)); | |||
580 | } | |||
581 | for (unsigned I = VSize; I < MaxChunkSize; ++I) | |||
582 | Data[I] = Data[I - VSize]; | |||
| ||||
583 | ||||
584 | // Set to largest multiple of VSize in Data. | |||
585 | const unsigned NumPerChunk = MaxChunkSize / VSize; | |||
586 | // Set ChunkSize to largest multiple of VSize in Data | |||
587 | const unsigned ChunkSize = VSize * NumPerChunk; | |||
588 | ||||
589 | // Do copies by chunk. | |||
590 | StringRef Ref(Data, ChunkSize); | |||
591 | for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) | |||
592 | OS << Ref; | |||
593 | ||||
594 | // do remainder if needed. | |||
595 | unsigned TrailingCount = FragmentSize % ChunkSize; | |||
596 | if (TrailingCount) | |||
597 | OS.write(Data, TrailingCount); | |||
598 | break; | |||
599 | } | |||
600 | ||||
601 | case MCFragment::FT_LEB: { | |||
602 | const MCLEBFragment &LF = cast<MCLEBFragment>(F); | |||
603 | OS << LF.getContents(); | |||
604 | break; | |||
605 | } | |||
606 | ||||
607 | case MCFragment::FT_BoundaryAlign: { | |||
608 | if (!Asm.getBackend().writeNopData(OS, FragmentSize)) | |||
609 | report_fatal_error("unable to write nop sequence of " + | |||
610 | Twine(FragmentSize) + " bytes"); | |||
611 | break; | |||
612 | } | |||
613 | ||||
614 | case MCFragment::FT_SymbolId: { | |||
615 | const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); | |||
616 | support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); | |||
617 | break; | |||
618 | } | |||
619 | ||||
620 | case MCFragment::FT_Org: { | |||
621 | ++stats::EmittedOrgFragments; | |||
622 | const MCOrgFragment &OF = cast<MCOrgFragment>(F); | |||
623 | ||||
624 | for (uint64_t i = 0, e = FragmentSize; i != e; ++i) | |||
625 | OS << char(OF.getValue()); | |||
626 | ||||
627 | break; | |||
628 | } | |||
629 | ||||
630 | case MCFragment::FT_Dwarf: { | |||
631 | const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); | |||
632 | OS << OF.getContents(); | |||
633 | break; | |||
634 | } | |||
635 | case MCFragment::FT_DwarfFrame: { | |||
636 | const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); | |||
637 | OS << CF.getContents(); | |||
638 | break; | |||
639 | } | |||
640 | case MCFragment::FT_CVInlineLines: { | |||
641 | const auto &OF = cast<MCCVInlineLineTableFragment>(F); | |||
642 | OS << OF.getContents(); | |||
643 | break; | |||
644 | } | |||
645 | case MCFragment::FT_CVDefRange: { | |||
646 | const auto &DRF = cast<MCCVDefRangeFragment>(F); | |||
647 | OS << DRF.getContents(); | |||
648 | break; | |||
649 | } | |||
650 | case MCFragment::FT_Dummy: | |||
651 | llvm_unreachable("Should not have been added")::llvm::llvm_unreachable_internal("Should not have been added" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 651); | |||
652 | } | |||
653 | ||||
654 | assert(OS.tell() - Start == FragmentSize &&((OS.tell() - Start == FragmentSize && "The stream should advance by fragment size" ) ? static_cast<void> (0) : __assert_fail ("OS.tell() - Start == FragmentSize && \"The stream should advance by fragment size\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 655, __PRETTY_FUNCTION__)) | |||
655 | "The stream should advance by fragment size")((OS.tell() - Start == FragmentSize && "The stream should advance by fragment size" ) ? static_cast<void> (0) : __assert_fail ("OS.tell() - Start == FragmentSize && \"The stream should advance by fragment size\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 655, __PRETTY_FUNCTION__)); | |||
656 | } | |||
657 | ||||
658 | void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, | |||
659 | const MCAsmLayout &Layout) const { | |||
660 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 660, __PRETTY_FUNCTION__)); | |||
| ||||
661 | ||||
662 | // Ignore virtual sections. | |||
663 | if (Sec->isVirtualSection()) { | |||
664 | assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!")((Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!" ) ? static_cast<void> (0) : __assert_fail ("Layout.getSectionFileSize(Sec) == 0 && \"Invalid size for section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 664, __PRETTY_FUNCTION__)); | |||
665 | ||||
666 | // Check that contents are only things legal inside a virtual section. | |||
667 | for (const MCFragment &F : *Sec) { | |||
668 | switch (F.getKind()) { | |||
669 | default: llvm_unreachable("Invalid fragment in virtual section!")::llvm::llvm_unreachable_internal("Invalid fragment in virtual section!" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 669); | |||
670 | case MCFragment::FT_Data: { | |||
671 | // Check that we aren't trying to write a non-zero contents (or fixups) | |||
672 | // into a virtual section. This is to support clients which use standard | |||
673 | // directives to fill the contents of virtual sections. | |||
674 | const MCDataFragment &DF = cast<MCDataFragment>(F); | |||
675 | if (DF.fixup_begin() != DF.fixup_end()) | |||
676 | report_fatal_error("cannot have fixups in virtual section!"); | |||
677 | for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) | |||
678 | if (DF.getContents()[i]) { | |||
679 | if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec)) | |||
680 | report_fatal_error("non-zero initializer found in section '" + | |||
681 | ELFSec->getSectionName() + "'"); | |||
682 | else | |||
683 | report_fatal_error("non-zero initializer found in virtual section"); | |||
684 | } | |||
685 | break; | |||
686 | } | |||
687 | case MCFragment::FT_Align: | |||
688 | // Check that we aren't trying to write a non-zero value into a virtual | |||
689 | // section. | |||
690 | assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||(((cast<MCAlignFragment>(F).getValueSize() == 0 || cast <MCAlignFragment>(F).getValue() == 0) && "Invalid align in virtual section!" ) ? static_cast<void> (0) : __assert_fail ("(cast<MCAlignFragment>(F).getValueSize() == 0 || cast<MCAlignFragment>(F).getValue() == 0) && \"Invalid align in virtual section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 692, __PRETTY_FUNCTION__)) | |||
691 | cast<MCAlignFragment>(F).getValue() == 0) &&(((cast<MCAlignFragment>(F).getValueSize() == 0 || cast <MCAlignFragment>(F).getValue() == 0) && "Invalid align in virtual section!" ) ? static_cast<void> (0) : __assert_fail ("(cast<MCAlignFragment>(F).getValueSize() == 0 || cast<MCAlignFragment>(F).getValue() == 0) && \"Invalid align in virtual section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 692, __PRETTY_FUNCTION__)) | |||
692 | "Invalid align in virtual section!")(((cast<MCAlignFragment>(F).getValueSize() == 0 || cast <MCAlignFragment>(F).getValue() == 0) && "Invalid align in virtual section!" ) ? static_cast<void> (0) : __assert_fail ("(cast<MCAlignFragment>(F).getValueSize() == 0 || cast<MCAlignFragment>(F).getValue() == 0) && \"Invalid align in virtual section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 692, __PRETTY_FUNCTION__)); | |||
693 | break; | |||
694 | case MCFragment::FT_Fill: | |||
695 | assert((cast<MCFillFragment>(F).getValue() == 0) &&(((cast<MCFillFragment>(F).getValue() == 0) && "Invalid fill in virtual section!" ) ? static_cast<void> (0) : __assert_fail ("(cast<MCFillFragment>(F).getValue() == 0) && \"Invalid fill in virtual section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 696, __PRETTY_FUNCTION__)) | |||
696 | "Invalid fill in virtual section!")(((cast<MCFillFragment>(F).getValue() == 0) && "Invalid fill in virtual section!" ) ? static_cast<void> (0) : __assert_fail ("(cast<MCFillFragment>(F).getValue() == 0) && \"Invalid fill in virtual section!\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 696, __PRETTY_FUNCTION__)); | |||
697 | break; | |||
698 | } | |||
699 | } | |||
700 | ||||
701 | return; | |||
702 | } | |||
703 | ||||
704 | uint64_t Start = OS.tell(); | |||
705 | (void)Start; | |||
706 | ||||
707 | for (const MCFragment &F : *Sec) | |||
708 | writeFragment(OS, *this, Layout, F); | |||
709 | ||||
710 | assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec))((OS.tell() - Start == Layout.getSectionAddressSize(Sec)) ? static_cast <void> (0) : __assert_fail ("OS.tell() - Start == Layout.getSectionAddressSize(Sec)" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 710, __PRETTY_FUNCTION__)); | |||
711 | } | |||
712 | ||||
713 | std::tuple<MCValue, uint64_t, bool> | |||
714 | MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, | |||
715 | const MCFixup &Fixup) { | |||
716 | // Evaluate the fixup. | |||
717 | MCValue Target; | |||
718 | uint64_t FixedValue; | |||
719 | bool WasForced; | |||
720 | bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, | |||
721 | WasForced); | |||
722 | if (!IsResolved) { | |||
723 | // The fixup was unresolved, we need a relocation. Inform the object | |||
724 | // writer of the relocation, and give it an opportunity to adjust the | |||
725 | // fixup value if need be. | |||
726 | if (Target.getSymA() && Target.getSymB() && | |||
727 | getBackend().requiresDiffExpressionRelocations()) { | |||
728 | // The fixup represents the difference between two symbols, which the | |||
729 | // backend has indicated must be resolved at link time. Split up the fixup | |||
730 | // into two relocations, one for the add, and one for the sub, and emit | |||
731 | // both of these. The constant will be associated with the add half of the | |||
732 | // expression. | |||
733 | MCFixup FixupAdd = MCFixup::createAddFor(Fixup); | |||
734 | MCValue TargetAdd = | |||
735 | MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); | |||
736 | getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, | |||
737 | FixedValue); | |||
738 | MCFixup FixupSub = MCFixup::createSubFor(Fixup); | |||
739 | MCValue TargetSub = MCValue::get(Target.getSymB()); | |||
740 | getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, | |||
741 | FixedValue); | |||
742 | } else { | |||
743 | getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, | |||
744 | FixedValue); | |||
745 | } | |||
746 | } | |||
747 | return std::make_tuple(Target, FixedValue, IsResolved); | |||
748 | } | |||
749 | ||||
750 | void MCAssembler::layout(MCAsmLayout &Layout) { | |||
751 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 751, __PRETTY_FUNCTION__)); | |||
752 | DEBUG_WITH_TYPE("mc-dump", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - pre-layout\n--\n" ; dump(); }; } } while (false) | |||
753 | errs() << "assembler backend - pre-layout\n--\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - pre-layout\n--\n" ; dump(); }; } } while (false) | |||
754 | dump(); })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - pre-layout\n--\n" ; dump(); }; } } while (false); | |||
755 | ||||
756 | // Create dummy fragments and assign section ordinals. | |||
757 | unsigned SectionIndex = 0; | |||
758 | for (MCSection &Sec : *this) { | |||
759 | // Create dummy fragments to eliminate any empty sections, this simplifies | |||
760 | // layout. | |||
761 | if (Sec.getFragmentList().empty()) | |||
762 | new MCDataFragment(&Sec); | |||
763 | ||||
764 | Sec.setOrdinal(SectionIndex++); | |||
765 | } | |||
766 | ||||
767 | // Assign layout order indices to sections and fragments. | |||
768 | for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { | |||
769 | MCSection *Sec = Layout.getSectionOrder()[i]; | |||
770 | Sec->setLayoutOrder(i); | |||
771 | ||||
772 | unsigned FragmentIndex = 0; | |||
773 | for (MCFragment &Frag : *Sec) | |||
774 | Frag.setLayoutOrder(FragmentIndex++); | |||
775 | } | |||
776 | ||||
777 | // Layout until everything fits. | |||
778 | while (layoutOnce(Layout)) | |||
779 | if (getContext().hadError()) | |||
780 | return; | |||
781 | ||||
782 | DEBUG_WITH_TYPE("mc-dump", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - post-relaxation\n--\n" ; dump(); }; } } while (false) | |||
783 | errs() << "assembler backend - post-relaxation\n--\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - post-relaxation\n--\n" ; dump(); }; } } while (false) | |||
784 | dump(); })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - post-relaxation\n--\n" ; dump(); }; } } while (false); | |||
785 | ||||
786 | // Finalize the layout, including fragment lowering. | |||
787 | finishLayout(Layout); | |||
788 | ||||
789 | DEBUG_WITH_TYPE("mc-dump", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - final-layout\n--\n" ; dump(); }; } } while (false) | |||
790 | errs() << "assembler backend - final-layout\n--\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - final-layout\n--\n" ; dump(); }; } } while (false) | |||
791 | dump(); })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("mc-dump")) { { errs() << "assembler backend - final-layout\n--\n" ; dump(); }; } } while (false); | |||
792 | ||||
793 | // Allow the object writer a chance to perform post-layout binding (for | |||
794 | // example, to set the index fields in the symbol data). | |||
795 | getWriter().executePostLayoutBinding(*this, Layout); | |||
796 | ||||
797 | // Evaluate and apply the fixups, generating relocation entries as necessary. | |||
798 | for (MCSection &Sec : *this) { | |||
799 | for (MCFragment &Frag : Sec) { | |||
800 | // Data and relaxable fragments both have fixups. So only process | |||
801 | // those here. | |||
802 | // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups | |||
803 | // being templated makes this tricky. | |||
804 | if (isa<MCEncodedFragment>(&Frag) && | |||
805 | isa<MCCompactEncodedInstFragment>(&Frag)) | |||
806 | continue; | |||
807 | if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag) && | |||
808 | !isa<MCAlignFragment>(&Frag)) | |||
809 | continue; | |||
810 | ArrayRef<MCFixup> Fixups; | |||
811 | MutableArrayRef<char> Contents; | |||
812 | const MCSubtargetInfo *STI = nullptr; | |||
813 | if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) { | |||
814 | Fixups = FragWithFixups->getFixups(); | |||
815 | Contents = FragWithFixups->getContents(); | |||
816 | STI = FragWithFixups->getSubtargetInfo(); | |||
817 | assert(!FragWithFixups->hasInstructions() || STI != nullptr)((!FragWithFixups->hasInstructions() || STI != nullptr) ? static_cast <void> (0) : __assert_fail ("!FragWithFixups->hasInstructions() || STI != nullptr" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 817, __PRETTY_FUNCTION__)); | |||
818 | } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) { | |||
819 | Fixups = FragWithFixups->getFixups(); | |||
820 | Contents = FragWithFixups->getContents(); | |||
821 | STI = FragWithFixups->getSubtargetInfo(); | |||
822 | assert(!FragWithFixups->hasInstructions() || STI != nullptr)((!FragWithFixups->hasInstructions() || STI != nullptr) ? static_cast <void> (0) : __assert_fail ("!FragWithFixups->hasInstructions() || STI != nullptr" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 822, __PRETTY_FUNCTION__)); | |||
823 | } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) { | |||
824 | Fixups = FragWithFixups->getFixups(); | |||
825 | Contents = FragWithFixups->getContents(); | |||
826 | } else if (auto *FragWithFixups = dyn_cast<MCDwarfLineAddrFragment>(&Frag)) { | |||
827 | Fixups = FragWithFixups->getFixups(); | |||
828 | Contents = FragWithFixups->getContents(); | |||
829 | } else if (auto *AF = dyn_cast<MCAlignFragment>(&Frag)) { | |||
830 | // Insert fixup type for code alignment if the target define | |||
831 | // shouldInsertFixupForCodeAlign target hook. | |||
832 | if (Sec.UseCodeAlign() && AF->hasEmitNops()) { | |||
833 | getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); | |||
834 | } | |||
835 | continue; | |||
836 | } else if (auto *FragWithFixups = | |||
837 | dyn_cast<MCDwarfCallFrameFragment>(&Frag)) { | |||
838 | Fixups = FragWithFixups->getFixups(); | |||
839 | Contents = FragWithFixups->getContents(); | |||
840 | } else | |||
841 | llvm_unreachable("Unknown fragment with fixups!")::llvm::llvm_unreachable_internal("Unknown fragment with fixups!" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 841); | |||
842 | for (const MCFixup &Fixup : Fixups) { | |||
843 | uint64_t FixedValue; | |||
844 | bool IsResolved; | |||
845 | MCValue Target; | |||
846 | std::tie(Target, FixedValue, IsResolved) = | |||
847 | handleFixup(Layout, Frag, Fixup); | |||
848 | getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, | |||
849 | IsResolved, STI); | |||
850 | } | |||
851 | } | |||
852 | } | |||
853 | } | |||
854 | ||||
855 | void MCAssembler::Finish() { | |||
856 | // Create the layout object. | |||
857 | MCAsmLayout Layout(*this); | |||
858 | layout(Layout); | |||
859 | ||||
860 | // Write the object file. | |||
861 | stats::ObjectBytes += getWriter().writeObject(*this, Layout); | |||
862 | } | |||
863 | ||||
864 | bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, | |||
865 | const MCRelaxableFragment *DF, | |||
866 | const MCAsmLayout &Layout) const { | |||
867 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 867, __PRETTY_FUNCTION__)); | |||
868 | MCValue Target; | |||
869 | uint64_t Value; | |||
870 | bool WasForced; | |||
871 | bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); | |||
872 | if (Target.getSymA() && | |||
873 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && | |||
874 | Fixup.getKind() == FK_Data_1) | |||
875 | return false; | |||
876 | return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, | |||
877 | Layout, WasForced); | |||
878 | } | |||
879 | ||||
880 | bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, | |||
881 | const MCAsmLayout &Layout) const { | |||
882 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 882, __PRETTY_FUNCTION__)); | |||
883 | // If this inst doesn't ever need relaxation, ignore it. This occurs when we | |||
884 | // are intentionally pushing out inst fragments, or because we relaxed a | |||
885 | // previous instruction to one that doesn't need relaxation. | |||
886 | if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) | |||
887 | return false; | |||
888 | ||||
889 | for (const MCFixup &Fixup : F->getFixups()) | |||
890 | if (fixupNeedsRelaxation(Fixup, F, Layout)) | |||
891 | return true; | |||
892 | ||||
893 | return false; | |||
894 | } | |||
895 | ||||
896 | bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, | |||
897 | MCRelaxableFragment &F) { | |||
898 | assert(getEmitterPtr() &&((getEmitterPtr() && "Expected CodeEmitter defined for relaxInstruction" ) ? static_cast<void> (0) : __assert_fail ("getEmitterPtr() && \"Expected CodeEmitter defined for relaxInstruction\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 899, __PRETTY_FUNCTION__)) | |||
899 | "Expected CodeEmitter defined for relaxInstruction")((getEmitterPtr() && "Expected CodeEmitter defined for relaxInstruction" ) ? static_cast<void> (0) : __assert_fail ("getEmitterPtr() && \"Expected CodeEmitter defined for relaxInstruction\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 899, __PRETTY_FUNCTION__)); | |||
900 | if (!fragmentNeedsRelaxation(&F, Layout)) | |||
901 | return false; | |||
902 | ||||
903 | ++stats::RelaxedInstructions; | |||
904 | ||||
905 | // FIXME-PERF: We could immediately lower out instructions if we can tell | |||
906 | // they are fully resolved, to avoid retesting on later passes. | |||
907 | ||||
908 | // Relax the fragment. | |||
909 | ||||
910 | MCInst Relaxed; | |||
911 | getBackend().relaxInstruction(F.getInst(), *F.getSubtargetInfo(), Relaxed); | |||
912 | ||||
913 | // Encode the new instruction. | |||
914 | // | |||
915 | // FIXME-PERF: If it matters, we could let the target do this. It can | |||
916 | // probably do so more efficiently in many cases. | |||
917 | SmallVector<MCFixup, 4> Fixups; | |||
918 | SmallString<256> Code; | |||
919 | raw_svector_ostream VecOS(Code); | |||
920 | getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); | |||
921 | ||||
922 | // Update the fragment. | |||
923 | F.setInst(Relaxed); | |||
924 | F.getContents() = Code; | |||
925 | F.getFixups() = Fixups; | |||
926 | ||||
927 | return true; | |||
928 | } | |||
929 | ||||
930 | bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { | |||
931 | uint64_t OldSize = LF.getContents().size(); | |||
932 | int64_t Value; | |||
933 | bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); | |||
934 | if (!Abs) | |||
935 | report_fatal_error("sleb128 and uleb128 expressions must be absolute"); | |||
936 | SmallString<8> &Data = LF.getContents(); | |||
937 | Data.clear(); | |||
938 | raw_svector_ostream OSE(Data); | |||
939 | // The compiler can generate EH table assembly that is impossible to assemble | |||
940 | // without either adding padding to an LEB fragment or adding extra padding | |||
941 | // to a later alignment fragment. To accommodate such tables, relaxation can | |||
942 | // only increase an LEB fragment size here, not decrease it. See PR35809. | |||
943 | if (LF.isSigned()) | |||
944 | encodeSLEB128(Value, OSE, OldSize); | |||
945 | else | |||
946 | encodeULEB128(Value, OSE, OldSize); | |||
947 | return OldSize != LF.getContents().size(); | |||
948 | } | |||
949 | ||||
950 | /// Check if the branch crosses the boundary. | |||
951 | /// | |||
952 | /// \param StartAddr start address of the fused/unfused branch. | |||
953 | /// \param Size size of the fused/unfused branch. | |||
954 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
955 | /// \returns true if the branch cross the boundary. | |||
956 | static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, | |||
957 | Align BoundaryAlignment) { | |||
958 | uint64_t EndAddr = StartAddr + Size; | |||
959 | return (StartAddr >> Log2(BoundaryAlignment)) != | |||
960 | ((EndAddr - 1) >> Log2(BoundaryAlignment)); | |||
961 | } | |||
962 | ||||
963 | /// Check if the branch is against the boundary. | |||
964 | /// | |||
965 | /// \param StartAddr start address of the fused/unfused branch. | |||
966 | /// \param Size size of the fused/unfused branch. | |||
967 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
968 | /// \returns true if the branch is against the boundary. | |||
969 | static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, | |||
970 | Align BoundaryAlignment) { | |||
971 | uint64_t EndAddr = StartAddr + Size; | |||
972 | return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; | |||
973 | } | |||
974 | ||||
975 | /// Check if the branch needs padding. | |||
976 | /// | |||
977 | /// \param StartAddr start address of the fused/unfused branch. | |||
978 | /// \param Size size of the fused/unfused branch. | |||
979 | /// \param BoundaryAlignment alignment requirement of the branch. | |||
980 | /// \returns true if the branch needs padding. | |||
981 | static bool needPadding(uint64_t StartAddr, uint64_t Size, | |||
982 | Align BoundaryAlignment) { | |||
983 | return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || | |||
984 | isAgainstBoundary(StartAddr, Size, BoundaryAlignment); | |||
985 | } | |||
986 | ||||
987 | bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, | |||
988 | MCBoundaryAlignFragment &BF) { | |||
989 | // The MCBoundaryAlignFragment that doesn't emit NOP should not be relaxed. | |||
990 | if (!BF.canEmitNops()) | |||
991 | return false; | |||
992 | ||||
993 | uint64_t AlignedOffset = Layout.getFragmentOffset(BF.getNextNode()); | |||
994 | uint64_t AlignedSize = 0; | |||
995 | const MCFragment *F = BF.getNextNode(); | |||
996 | // If the branch is unfused, it is emitted into one fragment, otherwise it is | |||
997 | // emitted into two fragments at most, the next MCBoundaryAlignFragment(if | |||
998 | // exists) also marks the end of the branch. | |||
999 | for (auto i = 0, N = BF.isFused() ? 2 : 1; | |||
1000 | i != N && !isa<MCBoundaryAlignFragment>(F); ++i, F = F->getNextNode()) { | |||
1001 | AlignedSize += computeFragmentSize(Layout, *F); | |||
1002 | } | |||
1003 | uint64_t OldSize = BF.getSize(); | |||
1004 | AlignedOffset -= OldSize; | |||
1005 | Align BoundaryAlignment = BF.getAlignment(); | |||
1006 | uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) | |||
1007 | ? offsetToAlignment(AlignedOffset, BoundaryAlignment) | |||
1008 | : 0U; | |||
1009 | if (NewSize == OldSize) | |||
1010 | return false; | |||
1011 | BF.setSize(NewSize); | |||
1012 | Layout.invalidateFragmentsFrom(&BF); | |||
1013 | return true; | |||
1014 | } | |||
1015 | ||||
1016 | bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, | |||
1017 | MCDwarfLineAddrFragment &DF) { | |||
1018 | MCContext &Context = Layout.getAssembler().getContext(); | |||
1019 | uint64_t OldSize = DF.getContents().size(); | |||
1020 | int64_t AddrDelta; | |||
1021 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | |||
1022 | assert(Abs && "We created a line delta with an invalid expression")((Abs && "We created a line delta with an invalid expression" ) ? static_cast<void> (0) : __assert_fail ("Abs && \"We created a line delta with an invalid expression\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 1022, __PRETTY_FUNCTION__)); | |||
1023 | (void)Abs; | |||
1024 | int64_t LineDelta; | |||
1025 | LineDelta = DF.getLineDelta(); | |||
1026 | SmallVectorImpl<char> &Data = DF.getContents(); | |||
1027 | Data.clear(); | |||
1028 | raw_svector_ostream OSE(Data); | |||
1029 | DF.getFixups().clear(); | |||
1030 | ||||
1031 | if (!getBackend().requiresDiffExpressionRelocations()) { | |||
1032 | MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, | |||
1033 | AddrDelta, OSE); | |||
1034 | } else { | |||
1035 | uint32_t Offset; | |||
1036 | uint32_t Size; | |||
1037 | bool SetDelta = MCDwarfLineAddr::FixedEncode(Context, | |||
1038 | getDWARFLinetableParams(), | |||
1039 | LineDelta, AddrDelta, | |||
1040 | OSE, &Offset, &Size); | |||
1041 | // Add Fixups for address delta or new address. | |||
1042 | const MCExpr *FixupExpr; | |||
1043 | if (SetDelta) { | |||
1044 | FixupExpr = &DF.getAddrDelta(); | |||
1045 | } else { | |||
1046 | const MCBinaryExpr *ABE = cast<MCBinaryExpr>(&DF.getAddrDelta()); | |||
1047 | FixupExpr = ABE->getLHS(); | |||
1048 | } | |||
1049 | DF.getFixups().push_back( | |||
1050 | MCFixup::create(Offset, FixupExpr, | |||
1051 | MCFixup::getKindForSize(Size, false /*isPCRel*/))); | |||
1052 | } | |||
1053 | ||||
1054 | return OldSize != Data.size(); | |||
1055 | } | |||
1056 | ||||
1057 | bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, | |||
1058 | MCDwarfCallFrameFragment &DF) { | |||
1059 | MCContext &Context = Layout.getAssembler().getContext(); | |||
1060 | uint64_t OldSize = DF.getContents().size(); | |||
1061 | int64_t AddrDelta; | |||
1062 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); | |||
1063 | assert(Abs && "We created call frame with an invalid expression")((Abs && "We created call frame with an invalid expression" ) ? static_cast<void> (0) : __assert_fail ("Abs && \"We created call frame with an invalid expression\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 1063, __PRETTY_FUNCTION__)); | |||
1064 | (void) Abs; | |||
1065 | SmallVectorImpl<char> &Data = DF.getContents(); | |||
1066 | Data.clear(); | |||
1067 | raw_svector_ostream OSE(Data); | |||
1068 | DF.getFixups().clear(); | |||
1069 | ||||
1070 | if (getBackend().requiresDiffExpressionRelocations()) { | |||
1071 | uint32_t Offset; | |||
1072 | uint32_t Size; | |||
1073 | MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE, &Offset, | |||
1074 | &Size); | |||
1075 | if (Size) { | |||
1076 | DF.getFixups().push_back(MCFixup::create( | |||
1077 | Offset, &DF.getAddrDelta(), | |||
1078 | MCFixup::getKindForSizeInBits(Size /*In bits.*/, false /*isPCRel*/))); | |||
1079 | } | |||
1080 | } else { | |||
1081 | MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); | |||
1082 | } | |||
1083 | ||||
1084 | return OldSize != Data.size(); | |||
1085 | } | |||
1086 | ||||
1087 | bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, | |||
1088 | MCCVInlineLineTableFragment &F) { | |||
1089 | unsigned OldSize = F.getContents().size(); | |||
1090 | getContext().getCVContext().encodeInlineLineTable(Layout, F); | |||
1091 | return OldSize != F.getContents().size(); | |||
1092 | } | |||
1093 | ||||
1094 | bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, | |||
1095 | MCCVDefRangeFragment &F) { | |||
1096 | unsigned OldSize = F.getContents().size(); | |||
1097 | getContext().getCVContext().encodeDefRange(Layout, F); | |||
1098 | return OldSize != F.getContents().size(); | |||
1099 | } | |||
1100 | ||||
1101 | bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { | |||
1102 | // Holds the first fragment which needed relaxing during this layout. It will | |||
1103 | // remain NULL if none were relaxed. | |||
1104 | // When a fragment is relaxed, all the fragments following it should get | |||
1105 | // invalidated because their offset is going to change. | |||
1106 | MCFragment *FirstRelaxedFragment = nullptr; | |||
1107 | ||||
1108 | // Attempt to relax all the fragments in the section. | |||
1109 | for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { | |||
1110 | // Check if this is a fragment that needs relaxation. | |||
1111 | bool RelaxedFrag = false; | |||
1112 | switch(I->getKind()) { | |||
1113 | default: | |||
1114 | break; | |||
1115 | case MCFragment::FT_Relaxable: | |||
1116 | assert(!getRelaxAll() &&((!getRelaxAll() && "Did not expect a MCRelaxableFragment in RelaxAll mode" ) ? static_cast<void> (0) : __assert_fail ("!getRelaxAll() && \"Did not expect a MCRelaxableFragment in RelaxAll mode\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 1117, __PRETTY_FUNCTION__)) | |||
1117 | "Did not expect a MCRelaxableFragment in RelaxAll mode")((!getRelaxAll() && "Did not expect a MCRelaxableFragment in RelaxAll mode" ) ? static_cast<void> (0) : __assert_fail ("!getRelaxAll() && \"Did not expect a MCRelaxableFragment in RelaxAll mode\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 1117, __PRETTY_FUNCTION__)); | |||
1118 | RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); | |||
1119 | break; | |||
1120 | case MCFragment::FT_Dwarf: | |||
1121 | RelaxedFrag = relaxDwarfLineAddr(Layout, | |||
1122 | *cast<MCDwarfLineAddrFragment>(I)); | |||
1123 | break; | |||
1124 | case MCFragment::FT_DwarfFrame: | |||
1125 | RelaxedFrag = | |||
1126 | relaxDwarfCallFrameFragment(Layout, | |||
1127 | *cast<MCDwarfCallFrameFragment>(I)); | |||
1128 | break; | |||
1129 | case MCFragment::FT_LEB: | |||
1130 | RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); | |||
1131 | break; | |||
1132 | case MCFragment::FT_BoundaryAlign: | |||
1133 | RelaxedFrag = | |||
1134 | relaxBoundaryAlign(Layout, *cast<MCBoundaryAlignFragment>(I)); | |||
1135 | break; | |||
1136 | case MCFragment::FT_CVInlineLines: | |||
1137 | RelaxedFrag = | |||
1138 | relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I)); | |||
1139 | break; | |||
1140 | case MCFragment::FT_CVDefRange: | |||
1141 | RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I)); | |||
1142 | break; | |||
1143 | } | |||
1144 | if (RelaxedFrag && !FirstRelaxedFragment) | |||
1145 | FirstRelaxedFragment = &*I; | |||
1146 | } | |||
1147 | if (FirstRelaxedFragment) { | |||
1148 | Layout.invalidateFragmentsFrom(FirstRelaxedFragment); | |||
1149 | return true; | |||
1150 | } | |||
1151 | return false; | |||
1152 | } | |||
1153 | ||||
1154 | bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { | |||
1155 | ++stats::RelaxationSteps; | |||
1156 | ||||
1157 | bool WasRelaxed = false; | |||
1158 | for (iterator it = begin(), ie = end(); it != ie; ++it) { | |||
1159 | MCSection &Sec = *it; | |||
1160 | while (layoutSectionOnce(Layout, Sec)) | |||
1161 | WasRelaxed = true; | |||
1162 | } | |||
1163 | ||||
1164 | return WasRelaxed; | |||
1165 | } | |||
1166 | ||||
1167 | void MCAssembler::finishLayout(MCAsmLayout &Layout) { | |||
1168 | assert(getBackendPtr() && "Expected assembler backend")((getBackendPtr() && "Expected assembler backend") ? static_cast <void> (0) : __assert_fail ("getBackendPtr() && \"Expected assembler backend\"" , "/build/llvm-toolchain-snapshot-10~++20200107111111+051c4d5b7bc/llvm/lib/MC/MCAssembler.cpp" , 1168, __PRETTY_FUNCTION__)); | |||
1169 | // The layout is done. Mark every fragment as valid. | |||
1170 | for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { | |||
1171 | MCSection &Section = *Layout.getSectionOrder()[i]; | |||
1172 | Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); | |||
1173 | computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); | |||
1174 | } | |||
1175 | getBackend().finishLayout(*this, Layout); | |||
1176 | } | |||
1177 | ||||
1178 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
1179 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void MCAssembler::dump() const{ | |||
1180 | raw_ostream &OS = errs(); | |||
1181 | ||||
1182 | OS << "<MCAssembler\n"; | |||
1183 | OS << " Sections:[\n "; | |||
1184 | for (const_iterator it = begin(), ie = end(); it != ie; ++it) { | |||
1185 | if (it != begin()) OS << ",\n "; | |||
1186 | it->dump(); | |||
1187 | } | |||
1188 | OS << "],\n"; | |||
1189 | OS << " Symbols:["; | |||
1190 | ||||
1191 | for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { | |||
1192 | if (it != symbol_begin()) OS << ",\n "; | |||
1193 | OS << "("; | |||
1194 | it->dump(); | |||
1195 | OS << ", Index:" << it->getIndex() << ", "; | |||
1196 | OS << ")"; | |||
1197 | } | |||
1198 | OS << "]>\n"; | |||
1199 | } | |||
1200 | #endif |