CIRCT  19.0.0git
LegalizeStateUpdate.cpp
Go to the documentation of this file.
1 //===- LegalizeStateUpdate.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
11 #include "mlir/Dialect/SCF/IR/SCF.h"
12 #include "mlir/IR/Dominance.h"
13 #include "mlir/IR/ImplicitLocOpBuilder.h"
14 #include "llvm/ADT/PointerIntPair.h"
15 #include "llvm/ADT/TypeSwitch.h"
16 #include "llvm/Support/Debug.h"
17 
18 #define DEBUG_TYPE "arc-legalize-state-update"
19 
20 namespace circt {
21 namespace arc {
22 #define GEN_PASS_DEF_LEGALIZESTATEUPDATE
23 #include "circt/Dialect/Arc/ArcPasses.h.inc"
24 } // namespace arc
25 } // namespace circt
26 
27 using namespace mlir;
28 using namespace circt;
29 using namespace arc;
30 
31 /// Check if an operation partakes in state accesses.
32 static bool isOpInteresting(Operation *op) {
33  if (isa<StateReadOp, StateWriteOp, CallOpInterface, CallableOpInterface>(op))
34  return true;
35  if (op->getNumRegions() > 0)
36  return true;
37  return false;
38 }
39 
40 //===----------------------------------------------------------------------===//
41 // Access Analysis
42 //===----------------------------------------------------------------------===//
43 
44 namespace {
45 
46 enum class AccessType { Read = 0, Write = 1 };
47 
48 /// A read or write access to a state value.
49 using Access = llvm::PointerIntPair<Value, 1, AccessType>;
50 
51 struct BlockAccesses;
52 struct OpAccesses;
53 
54 /// A block's access analysis information and graph edges.
55 struct BlockAccesses {
56  BlockAccesses(Block *block) : block(block) {}
57 
58  /// The block.
59  Block *const block;
60  /// The parent op lattice node.
61  OpAccesses *parent = nullptr;
62  /// The accesses from ops within this block to the block arguments.
63  SmallPtrSet<Access, 1> argAccesses;
64  /// The accesses from ops within this block to values defined outside the
65  /// block.
66  SmallPtrSet<Access, 1> aboveAccesses;
67 };
68 
69 /// An operation's access analysis information and graph edges.
70 struct OpAccesses {
71  OpAccesses(Operation *op) : op(op) {}
72 
73  /// The operation.
74  Operation *const op;
75  /// The parent block lattice node.
76  BlockAccesses *parent = nullptr;
77  /// If this is a callable op, `callers` is the set of ops calling it.
78  SmallPtrSet<OpAccesses *, 1> callers;
79  /// The accesses performed by this op.
80  SmallPtrSet<Access, 1> accesses;
81 };
82 
83 /// An analysis that determines states read and written by operations and
84 /// blocks. Looks through calls and handles nested operations properly. Does not
85 /// follow state values returned from functions and modified by operations.
86 struct AccessAnalysis {
87  LogicalResult analyze(Operation *op);
88  OpAccesses *lookup(Operation *op);
89  BlockAccesses *lookup(Block *block);
90 
91  /// A global order assigned to state values. These allow us to not care about
92  /// ordering during the access analysis and only establish a determinstic
93  /// order once we insert additional operations later on.
94  DenseMap<Value, unsigned> stateOrder;
95 
96  /// A symbol table cache.
97  SymbolTableCollection symbolTable;
98 
99 private:
100  llvm::SpecificBumpPtrAllocator<OpAccesses> opAlloc;
101  llvm::SpecificBumpPtrAllocator<BlockAccesses> blockAlloc;
102 
103  DenseMap<Operation *, OpAccesses *> opAccesses;
104  DenseMap<Block *, BlockAccesses *> blockAccesses;
105 
106  SetVector<OpAccesses *> opWorklist;
107  bool anyInvalidStateAccesses = false;
108 
109  // Get the node for an operation, creating one if necessary.
110  OpAccesses &get(Operation *op) {
111  auto &slot = opAccesses[op];
112  if (!slot)
113  slot = new (opAlloc.Allocate()) OpAccesses(op);
114  return *slot;
115  }
116 
117  // Get the node for a block, creating one if necessary.
118  BlockAccesses &get(Block *block) {
119  auto &slot = blockAccesses[block];
120  if (!slot)
121  slot = new (blockAlloc.Allocate()) BlockAccesses(block);
122  return *slot;
123  }
124 
125  // NOLINTBEGIN(misc-no-recursion)
126  void addOpAccess(OpAccesses &op, Access access);
127  void addBlockAccess(BlockAccesses &block, Access access);
128  // NOLINTEND(misc-no-recursion)
129 };
130 } // namespace
131 
132 LogicalResult AccessAnalysis::analyze(Operation *op) {
133  LLVM_DEBUG(llvm::dbgs() << "Analyzing accesses in " << op->getName() << "\n");
134 
135  // Create the lattice nodes for all blocks and operations.
136  llvm::SmallSetVector<OpAccesses *, 16> initWorklist;
137  initWorklist.insert(&get(op));
138  while (!initWorklist.empty()) {
139  OpAccesses &opNode = *initWorklist.pop_back_val();
140 
141  // First create lattice nodes for all nested blocks and operations.
142  for (auto &region : opNode.op->getRegions()) {
143  for (auto &block : region) {
144  BlockAccesses &blockNode = get(&block);
145  blockNode.parent = &opNode;
146  for (auto &subOp : block) {
147  if (!isOpInteresting(&subOp))
148  continue;
149  OpAccesses &subOpNode = get(&subOp);
150  if (!subOp.hasTrait<OpTrait::IsIsolatedFromAbove>()) {
151  subOpNode.parent = &blockNode;
152  }
153  initWorklist.insert(&subOpNode);
154  }
155  }
156  }
157 
158  // Track the relationship between callers and callees.
159  if (auto callOp = dyn_cast<CallOpInterface>(opNode.op))
160  if (auto *calleeOp = callOp.resolveCallable(&symbolTable))
161  get(calleeOp).callers.insert(&opNode);
162 
163  // Create the seed accesses.
164  if (auto readOp = dyn_cast<StateReadOp>(opNode.op))
165  addOpAccess(opNode, Access(readOp.getState(), AccessType::Read));
166  else if (auto writeOp = dyn_cast<StateWriteOp>(opNode.op))
167  addOpAccess(opNode, Access(writeOp.getState(), AccessType::Write));
168  }
169  LLVM_DEBUG(llvm::dbgs() << "- Prepared " << blockAccesses.size()
170  << " block and " << opAccesses.size()
171  << " op lattice nodes\n");
172  LLVM_DEBUG(llvm::dbgs() << "- Worklist has " << opWorklist.size()
173  << " initial ops\n");
174 
175  // Propagate accesses through calls.
176  while (!opWorklist.empty()) {
177  if (anyInvalidStateAccesses)
178  return failure();
179  auto &opNode = *opWorklist.pop_back_val();
180  if (opNode.callers.empty())
181  continue;
182  auto calleeOp = dyn_cast<CallableOpInterface>(opNode.op);
183  if (!calleeOp)
184  return opNode.op->emitOpError(
185  "does not implement CallableOpInterface but has callers");
186  LLVM_DEBUG(llvm::dbgs() << "- Updating callable " << opNode.op->getName()
187  << " " << opNode.op->getAttr("sym_name") << "\n");
188 
189  auto &calleeRegion = *calleeOp.getCallableRegion();
190  auto *blockNode = lookup(&calleeRegion.front());
191  if (!blockNode)
192  continue;
193  auto calleeArgs = blockNode->block->getArguments();
194 
195  for (auto *callOpNode : opNode.callers) {
196  LLVM_DEBUG(llvm::dbgs() << " - Updating " << *callOpNode->op << "\n");
197  auto callArgs = cast<CallOpInterface>(callOpNode->op).getArgOperands();
198  for (auto [calleeArg, callArg] : llvm::zip(calleeArgs, callArgs)) {
199  if (blockNode->argAccesses.contains({calleeArg, AccessType::Read}))
200  addOpAccess(*callOpNode, {callArg, AccessType::Read});
201  if (blockNode->argAccesses.contains({calleeArg, AccessType::Write}))
202  addOpAccess(*callOpNode, {callArg, AccessType::Write});
203  }
204  }
205  }
206 
207  return failure(anyInvalidStateAccesses);
208 }
209 
210 OpAccesses *AccessAnalysis::lookup(Operation *op) {
211  return opAccesses.lookup(op);
212 }
213 
214 BlockAccesses *AccessAnalysis::lookup(Block *block) {
215  return blockAccesses.lookup(block);
216 }
217 
218 // NOLINTBEGIN(misc-no-recursion)
219 void AccessAnalysis::addOpAccess(OpAccesses &op, Access access) {
220  // We don't support state pointers flowing among ops and blocks. Check that
221  // the accessed state is either directly passed down through a block argument
222  // (no defining op), or is trivially a local state allocation.
223  auto *defOp = access.getPointer().getDefiningOp();
224  if (defOp && !isa<AllocStateOp, RootInputOp, RootOutputOp>(defOp)) {
225  auto d = op.op->emitOpError("accesses non-trivial state value defined by `")
226  << defOp->getName()
227  << "`; only block arguments and `arc.alloc_state` results are "
228  "supported";
229  d.attachNote(defOp->getLoc()) << "state defined here";
230  anyInvalidStateAccesses = true;
231  }
232 
233  // HACK: Do not propagate accesses outside of `arc.passthrough` to prevent
234  // reads from being legalized. Ideally we'd be able to more precisely specify
235  // on read ops whether they should read the initial or the final value.
236  if (isa<PassThroughOp>(op.op))
237  return;
238 
239  // Propagate to the parent block and operation if the access escapes the block
240  // or targets a block argument.
241  if (op.accesses.insert(access).second && op.parent) {
242  stateOrder.insert({access.getPointer(), stateOrder.size()});
243  addBlockAccess(*op.parent, access);
244  }
245 }
246 
247 void AccessAnalysis::addBlockAccess(BlockAccesses &block, Access access) {
248  Value value = access.getPointer();
249 
250  // If the accessed value is defined outside the block, add it to the set of
251  // outside accesses.
252  if (value.getParentBlock() != block.block) {
253  if (block.aboveAccesses.insert(access).second)
254  addOpAccess(*block.parent, access);
255  return;
256  }
257 
258  // If the accessed value is defined within the block, and it is a block
259  // argument, add it to the list of block argument accesses.
260  if (auto blockArg = dyn_cast<BlockArgument>(value)) {
261  assert(blockArg.getOwner() == block.block);
262  if (!block.argAccesses.insert(access).second)
263  return;
264 
265  // Adding block argument accesses affects calls to the surrounding ops. Add
266  // the op to the worklist such that the access can propagate to callers.
267  opWorklist.insert(block.parent);
268  }
269 }
270 // NOLINTEND(misc-no-recursion)
271 
272 //===----------------------------------------------------------------------===//
273 // Legalization
274 //===----------------------------------------------------------------------===//
275 
276 namespace {
277 struct Legalizer {
278  Legalizer(AccessAnalysis &analysis) : analysis(analysis) {}
279  LogicalResult run(MutableArrayRef<Region> regions);
280  LogicalResult visitBlock(Block *block);
281 
282  AccessAnalysis &analysis;
283 
284  unsigned numLegalizedWrites = 0;
285  unsigned numUpdatedReads = 0;
286 
287  /// A mapping from pre-existing states to temporary states for read
288  /// operations, created during legalization to remove read-after-write
289  /// hazards.
290  DenseMap<Value, Value> legalizedStates;
291 };
292 } // namespace
293 
294 LogicalResult Legalizer::run(MutableArrayRef<Region> regions) {
295  for (auto &region : regions)
296  for (auto &block : region)
297  if (failed(visitBlock(&block)))
298  return failure();
299  assert(legalizedStates.empty() && "should be balanced within block");
300  return success();
301 }
302 
303 LogicalResult Legalizer::visitBlock(Block *block) {
304  // In a first reverse pass over the block, find the first write that occurs
305  // before the last read of a state, if any.
306  SmallPtrSet<Value, 4> readStates;
307  DenseMap<Value, Operation *> illegallyWrittenStates;
308  for (Operation &op : llvm::reverse(*block)) {
309  const auto *accesses = analysis.lookup(&op);
310  if (!accesses)
311  continue;
312 
313  // Determine the states written by this op for which we have already seen a
314  // read earlier. These writes need to be legalized.
315  SmallVector<Value, 1> affectedStates;
316  for (auto access : accesses->accesses)
317  if (access.getInt() == AccessType::Write)
318  if (readStates.contains(access.getPointer()))
319  illegallyWrittenStates[access.getPointer()] = &op;
320 
321  // Determine the states read by this op. This comes after handling of the
322  // writes, such that a block that contains both reads and writes to a state
323  // doesn't mark itself as illegal. Instead, we will descend into that block
324  // further down and do a more fine-grained legalization.
325  for (auto access : accesses->accesses)
326  if (access.getInt() == AccessType::Read)
327  readStates.insert(access.getPointer());
328  }
329 
330  // Create a mapping from operations that create a read-after-write hazard to
331  // the states that they modify. Don't consider states that have already been
332  // legalized. This is important since we may have already created a temporary
333  // in a parent block which we can just reuse.
334  DenseMap<Operation *, SmallVector<Value, 1>> illegalWrites;
335  for (auto [state, op] : illegallyWrittenStates)
336  if (!legalizedStates.count(state))
337  illegalWrites[op].push_back(state);
338 
339  // In a second forward pass over the block, insert the necessary temporary
340  // state to legalize the writes and recur into subblocks while providing the
341  // necessary rewrites.
342  SmallVector<Value> locallyLegalizedStates;
343 
344  auto handleIllegalWrites =
345  [&](Operation *op, SmallVector<Value, 1> &states) -> LogicalResult {
346  LLVM_DEBUG(llvm::dbgs() << "Visiting illegal " << op->getName() << "\n");
347 
348  // Sort the states we need to legalize by a determinstic order established
349  // during the access analysis. Without this the exact order in which states
350  // were moved into a temporary would be non-deterministic.
351  llvm::sort(states, [&](Value a, Value b) {
352  return analysis.stateOrder.lookup(a) < analysis.stateOrder.lookup(b);
353  });
354 
355  // Legalize each state individually.
356  for (auto state : states) {
357  LLVM_DEBUG(llvm::dbgs() << "- Legalizing " << state << "\n");
358 
359  // HACK: This is ugly, but we need a storage reference to allocate a state
360  // into. Ideally we'd materialize this later on, but the current impl of
361  // the alloc op requires a storage immediately. So try to find one.
362  auto storage = TypeSwitch<Operation *, Value>(state.getDefiningOp())
363  .Case<AllocStateOp, RootInputOp, RootOutputOp>(
364  [&](auto allocOp) { return allocOp.getStorage(); })
365  .Default([](auto) { return Value{}; });
366  if (!storage) {
367  mlir::emitError(
368  state.getLoc(),
369  "cannot find storage pointer to allocate temporary into");
370  return failure();
371  }
372 
373  // Allocate a temporary state, read the current value of the state we are
374  // legalizing, and write it to the temporary.
375  ++numLegalizedWrites;
376  ImplicitLocOpBuilder builder(state.getLoc(), op);
377  auto tmpState =
378  builder.create<AllocStateOp>(state.getType(), storage, nullptr);
379  auto stateValue = builder.create<StateReadOp>(state);
380  builder.create<StateWriteOp>(tmpState, stateValue, Value{});
381  locallyLegalizedStates.push_back(state);
382  legalizedStates.insert({state, tmpState});
383  }
384  return success();
385  };
386 
387  for (Operation &op : *block) {
388  if (isOpInteresting(&op)) {
389  if (auto it = illegalWrites.find(&op); it != illegalWrites.end())
390  if (failed(handleIllegalWrites(&op, it->second)))
391  return failure();
392  }
393  // BUG: This is insufficient. Actually only reads should have their state
394  // updated, since we want writes to still affect the original state. This
395  // works for `state_read`, but in the case of a function that both reads and
396  // writes a state we only have a single operand to change but we would need
397  // one for reads and one for writes instead.
398  // HACKY FIX: Assume that there is ever only a single write to a state. In
399  // that case it is safe to assume that when an op is marked as writing a
400  // state it wants the original state, not the temporary one for reads.
401  const auto *accesses = analysis.lookup(&op);
402  for (auto &operand : op.getOpOperands()) {
403  if (accesses &&
404  accesses->accesses.contains({operand.get(), AccessType::Read}) &&
405  accesses->accesses.contains({operand.get(), AccessType::Write})) {
406  auto d = op.emitWarning("operation reads and writes state; "
407  "legalization may be insufficient");
408  d.attachNote()
409  << "state update legalization does not properly handle operations "
410  "that both read and write states at the same time; runtime data "
411  "races between the read and write behavior are possible";
412  d.attachNote(operand.get().getLoc()) << "state defined here:";
413  }
414  if (!accesses ||
415  !accesses->accesses.contains({operand.get(), AccessType::Write})) {
416  if (auto tmpState = legalizedStates.lookup(operand.get())) {
417  operand.set(tmpState);
418  ++numUpdatedReads;
419  }
420  }
421  }
422  for (auto &region : op.getRegions())
423  for (auto &block : region)
424  if (failed(visitBlock(&block)))
425  return failure();
426  }
427 
428  // Since we're leaving this block's scope, remove all the locally-legalized
429  // states which are no longer accessible outside.
430  for (auto state : locallyLegalizedStates)
431  legalizedStates.erase(state);
432  return success();
433 }
434 
436  Operation *write, Operation **writeAncestor, Operation *read,
437  Operation **readAncestor, DominanceInfo *domInfo) {
438  Block *commonDominator =
439  domInfo->findNearestCommonDominator(write->getBlock(), read->getBlock());
440  if (!commonDominator)
441  return write->emitOpError(
442  "cannot find a common dominator block with all read operations");
443 
444  // Path from writeOp to commmon dominator must only contain IfOps with no
445  // return values
446  Operation *writeParent = write;
447  while (writeParent->getBlock() != commonDominator) {
448  if (!isa<scf::IfOp, ClockTreeOp>(writeParent->getParentOp()))
449  return write->emitOpError("memory write operations in arbitrarily nested "
450  "regions not supported");
451  writeParent = writeParent->getParentOp();
452  }
453  Operation *readParent = read;
454  while (readParent->getBlock() != commonDominator)
455  readParent = readParent->getParentOp();
456 
457  *writeAncestor = writeParent;
458  *readAncestor = readParent;
459  return success();
460 }
461 
462 static LogicalResult
463 moveMemoryWritesAfterLastRead(Region &region, const DenseSet<Value> &memories,
464  DominanceInfo *domInfo) {
465  // Collect memory values and their reads
466  DenseMap<Value, SetVector<Operation *>> readOps;
467  auto result = region.walk([&](Operation *op) {
468  if (isa<MemoryWriteOp>(op))
469  return WalkResult::advance();
470  SmallVector<Value> memoriesReadFrom;
471  if (auto readOp = dyn_cast<MemoryReadOp>(op)) {
472  memoriesReadFrom.push_back(readOp.getMemory());
473  } else {
474  for (auto operand : op->getOperands())
475  if (isa<MemoryType>(operand.getType()))
476  memoriesReadFrom.push_back(operand);
477  }
478  for (auto memVal : memoriesReadFrom) {
479  if (!memories.contains(memVal))
480  return op->emitOpError("uses memory value not directly defined by a "
481  "arc.alloc_memory operation"),
482  WalkResult::interrupt();
483  readOps[memVal].insert(op);
484  }
485 
486  return WalkResult::advance();
487  });
488 
489  if (result.wasInterrupted())
490  return failure();
491 
492  // Collect all writes
493  SmallVector<MemoryWriteOp> writes;
494  region.walk([&](MemoryWriteOp writeOp) { writes.push_back(writeOp); });
495 
496  // Move the writes
497  for (auto writeOp : writes) {
498  if (!memories.contains(writeOp.getMemory()))
499  return writeOp->emitOpError("uses memory value not directly defined by a "
500  "arc.alloc_memory operation");
501  for (auto *readOp : readOps[writeOp.getMemory()]) {
502  // (1) If the last read and the write are in the same block, just move the
503  // write after the read.
504  // (2) If the write is directly in the clock tree region and the last read
505  // in some nested region, move the write after the operation with the
506  // nested region. (3) If the write is nested in if-statements (arbitrarily
507  // deep) without return value, move the whole if operation after the last
508  // read or the operation that defines the region if the read is inside a
509  // nested region. (4) Number (3) may move more memory operations with the
510  // write op, thus messing up the order of previously moved memory writes,
511  // we check in a second walk-through if that is the case and just emit an
512  // error for now. We could instead move reads in a parent region, split if
513  // operations such that the memory write has its own, etc. Alternatively,
514  // rewrite this to insert temporaries which is more difficult for memories
515  // than simple states because the memory addresses have to be considered
516  // (we cannot just copy the whole memory each time).
517  Operation *readAncestor, *writeAncestor;
519  writeOp, &writeAncestor, readOp, &readAncestor, domInfo)))
520  return failure();
521  // FIXME: the 'isBeforeInBlock` + 'moveAfter' compination can be
522  // computationally very expensive.
523  if (writeAncestor->isBeforeInBlock(readAncestor))
524  writeAncestor->moveAfter(readAncestor);
525  }
526  }
527 
528  // Double check that all writes happen after all reads to the same memory.
529  for (auto writeOp : writes) {
530  for (auto *readOp : readOps[writeOp.getMemory()]) {
531  Operation *readAncestor, *writeAncestor;
533  writeOp, &writeAncestor, readOp, &readAncestor, domInfo)))
534  return failure();
535 
536  if (writeAncestor->isBeforeInBlock(readAncestor))
537  return writeOp
538  ->emitOpError("could not be moved to be after all reads to "
539  "the same memory")
540  .attachNote(readOp->getLoc())
541  << "could not be moved after this read";
542  }
543  }
544 
545  return success();
546 }
547 
548 //===----------------------------------------------------------------------===//
549 // Pass Infrastructure
550 //===----------------------------------------------------------------------===//
551 
552 namespace {
553 struct LegalizeStateUpdatePass
554  : public arc::impl::LegalizeStateUpdateBase<LegalizeStateUpdatePass> {
555  LegalizeStateUpdatePass() = default;
556  LegalizeStateUpdatePass(const LegalizeStateUpdatePass &pass)
557  : LegalizeStateUpdatePass() {}
558 
559  void runOnOperation() override;
560 
561  Statistic numLegalizedWrites{
562  this, "legalized-writes",
563  "Writes that required temporary state for later reads"};
564  Statistic numUpdatedReads{this, "updated-reads", "Reads that were updated"};
565 };
566 } // namespace
567 
568 void LegalizeStateUpdatePass::runOnOperation() {
569  auto module = getOperation();
570  auto *domInfo = &getAnalysis<DominanceInfo>();
571 
572  for (auto model : module.getOps<ModelOp>()) {
573  DenseSet<Value> memories;
574  for (auto memOp : model.getOps<AllocMemoryOp>())
575  memories.insert(memOp.getResult());
576  for (auto ct : model.getOps<ClockTreeOp>())
577  if (failed(
578  moveMemoryWritesAfterLastRead(ct.getBody(), memories, domInfo)))
579  return signalPassFailure();
580  }
581 
582  AccessAnalysis analysis;
583  if (failed(analysis.analyze(module)))
584  return signalPassFailure();
585 
586  Legalizer legalizer(analysis);
587  if (failed(legalizer.run(module->getRegions())))
588  return signalPassFailure();
589  numLegalizedWrites += legalizer.numLegalizedWrites;
590  numUpdatedReads += legalizer.numUpdatedReads;
591 }
592 
593 std::unique_ptr<Pass> arc::createLegalizeStateUpdatePass() {
594  return std::make_unique<LegalizeStateUpdatePass>();
595 }
assert(baseType &&"element must be base type")
static LogicalResult getAncestorOpsInCommonDominatorBlock(Operation *write, Operation **writeAncestor, Operation *read, Operation **readAncestor, DominanceInfo *domInfo)
static LogicalResult moveMemoryWritesAfterLastRead(Region &region, const DenseSet< Value > &memories, DominanceInfo *domInfo)
static bool isOpInteresting(Operation *op)
Check if an operation partakes in state accesses.
Builder builder
std::unique_ptr< mlir::Pass > createLegalizeStateUpdatePass()
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition: CalyxOps.cpp:54
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition: DebugAnalysis.h:21