CIRCT  20.0.0git
AdvancedLayerSink.cpp
Go to the documentation of this file.
1 //===- AdvancedLayerSink.cpp - Sink ops into layer blocks -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass sinks operations into layer blocks.
10 //
11 //===----------------------------------------------------------------------===//
12 
16 #include "circt/Support/Debug.h"
17 #include "mlir/IR/Dominance.h"
18 #include "mlir/IR/Iterators.h"
19 #include "mlir/IR/Threading.h"
20 #include "mlir/Interfaces/ControlFlowInterfaces.h"
21 #include "mlir/Interfaces/SideEffectInterfaces.h"
22 #include "mlir/Pass/Pass.h"
23 #include "mlir/Transforms/ControlFlowSinkUtils.h"
24 
25 #define DEBUG_TYPE "firrtl-layer-sink"
26 
27 namespace circt {
28 namespace firrtl {
29 #define GEN_PASS_DEF_ADVANCEDLAYERSINK
30 #include "circt/Dialect/FIRRTL/Passes.h.inc"
31 } // namespace firrtl
32 } // namespace circt
33 
34 using namespace circt;
35 using namespace firrtl;
36 using namespace mlir;
37 
38 //===----------------------------------------------------------------------===//
39 // Helpers
40 //===----------------------------------------------------------------------===//
41 
42 // NOLINTBEGIN(misc-no-recursion)
43 namespace {
44 /// Walk the ops in `block` bottom-up, back-to-front order. The block walk API
45 /// in upstream MLIR, although it takes an Iterator parameter, will always walk
46 /// the top block front-to-back. This walk function will actually walk the ops
47 /// under `block` back-to-front.
48 template <typename T>
49 void walkBwd(Block *block, T &&f) {
50  for (auto &op :
51  llvm::make_early_inc_range(llvm::reverse(block->getOperations()))) {
52  for (auto &region : op.getRegions())
53  for (auto &block : region.getBlocks())
54  walkBwd(&block, f);
55  f(&op);
56  }
57 }
58 } // namespace
59 // NOLINTEND(misc-no-recursion)
60 
61 static bool isAncestor(Block *block, Block *other) {
62  if (block == other)
63  return true;
64  return block->getParent()->isProperAncestor(other->getParent());
65 }
66 
67 //===----------------------------------------------------------------------===//
68 // Effectfulness Analysis.
69 //===----------------------------------------------------------------------===//
70 
71 /// A table that can determine whether an operation is effectful.
72 namespace {
73 class EffectInfo {
74 public:
75  EffectInfo(CircuitOp circuit, InstanceGraph &instanceGraph) {
76  DenseSet<InstanceGraphNode *> visited;
77  for (auto *root : instanceGraph) {
78  for (auto *node : llvm::post_order_ext(root, visited)) {
79  auto *op = node->getModule().getOperation();
80  update(op);
81  }
82  }
83  }
84 
85  /// True if the given operation is NOT moveable due to some effect.
86  bool effectful(Operation *op) const {
87  if (!AnnotationSet(op).canBeDeleted() || hasDontTouch(op))
88  return true;
89  if (auto name = dyn_cast<FNamableOp>(op))
90  if (!name.hasDroppableName())
91  return true;
92  if (op->getNumRegions() != 0)
93  return true;
94  if (auto instance = dyn_cast<InstanceOp>(op))
95  return effectfulModules.contains(instance.getModuleNameAttr().getAttr());
96  if (isa<FConnectLike, WireOp, RegResetOp, RegOp, MemOp, NodeOp>(op))
97  return false;
98  return !(mlir::isMemoryEffectFree(op) ||
99  mlir::hasSingleEffect<mlir::MemoryEffects::Allocate>(op) ||
100  mlir::hasSingleEffect<mlir::MemoryEffects::Read>(op));
101  }
102 
103 private:
104  /// Record whether the module contains any effectful ops.
105  void update(FModuleOp moduleOp) {
106  moduleOp.getBodyBlock()->walk([&](Operation *op) {
107  if (effectful(op)) {
108  markEffectful(moduleOp);
109  return WalkResult::interrupt();
110  }
111  return WalkResult::advance();
112  });
113  }
114 
115  void update(FModuleLike moduleOp) {
116  if (!AnnotationSet(moduleOp).canBeDeleted())
117  return markEffectful(moduleOp);
118  auto *op = moduleOp.getOperation();
119  // Regular modules may be pure.
120  if (auto m = dyn_cast<FModuleOp>(op))
121  return update(m);
122  // Memory modules are pure.
123  if (auto m = dyn_cast<FMemModuleOp>(op))
124  return;
125  // All other kinds of modules are effectful.
126  // intmodules, extmodules, classes.
127  return markEffectful(moduleOp);
128  }
129 
130  void update(Operation *op) {
131  if (auto moduleOp = dyn_cast<FModuleLike>(op))
132  update(moduleOp);
133  }
134 
135  /// Record that the given module contains an effectful operation.
136  void markEffectful(FModuleLike moduleOp) {
137  effectfulModules.insert(moduleOp.getModuleNameAttr());
138  }
139 
140  DenseSet<StringAttr> effectfulModules;
141 };
142 } // namespace
143 
144 //===----------------------------------------------------------------------===//
145 // Demands Analysis.
146 //===----------------------------------------------------------------------===//
147 
148 namespace {
149 /// The LCA of the blocks in which a value is used/demanded. Lattice value.
150 struct Demand {
151  constexpr Demand() : Demand(nullptr) {}
152  constexpr Demand(Block *block) : block(block) {}
153 
154  constexpr Demand merge(Demand other) const {
155  if (block == other.block)
156  return Demand(block);
157  if (other.block == nullptr)
158  return Demand(block);
159  if (block == nullptr)
160  return Demand(other.block);
161 
162  auto *b = block;
163  while (b && !isAncestor(b, other.block))
164  b = b->getParentOp()->getBlock();
165 
166  return Demand(b);
167  }
168 
169  bool mergeIn(Demand other) {
170  auto prev = *this;
171  auto next = merge(other);
172  *this = next;
173  return prev != next;
174  }
175 
176  constexpr bool operator==(Demand rhs) const { return block == rhs.block; }
177  constexpr bool operator!=(Demand rhs) const { return block != rhs.block; }
178  constexpr operator bool() const { return block; }
179 
180  Block *block;
181 };
182 } // namespace
183 
184 /// True if this operation is a good site to sink operations.
185 static bool isValidDest(Operation *op) {
186  return op && (isa<LayerBlockOp>(op) || isa<FModuleOp>(op));
187 }
188 
189 /// True if we are prevented from sinking operations into the regions of the op.
190 static bool isBarrier(Operation *op) { return !isValidDest(op); }
191 
192 /// Adjust the demand based on the location of the op being demanded. Ideally,
193 /// we can sink an op directly to its site of use. However, there are two
194 /// issues.
195 ///
196 /// 1) Typically, an op will dominate every demand, but for hardware
197 /// declarations such as wires, the declaration will demand any connections
198 /// driving it. In this case, the relationship is reversed: the demander
199 /// dominates the demandee. This can cause us to pull connect-like ops up and
200 /// and out of their enclosing block (ref-defines can be buried under
201 /// layerblocks). To avoid this, we set an upper bound on the demand: the
202 /// enclosing block of the demandee.
203 ///
204 /// 2) not all regions are valid sink targets. If there is a sink-barrier
205 /// between the operation and its demand, we adjust the demand upwards so that
206 /// there is no sink barrier between the demandee and the demand site.
207 static Demand clamp(Operation *op, Demand demand) {
208  if (!demand)
209  return nullptr;
210 
211  auto *upper = op->getBlock();
212  assert(upper && "this should not be called on a top-level operation.");
213 
214  if (!isAncestor(upper, demand.block))
215  demand = upper;
216 
217  for (auto *i = demand.block; i != upper; i = i->getParentOp()->getBlock())
218  if (isBarrier(i->getParentOp()))
219  demand = i->getParentOp()->getBlock();
220 
221  return demand;
222 }
223 
224 namespace {
225 struct DemandInfo {
226  using WorkStack = std::vector<Operation *>;
227 
228  DemandInfo(const EffectInfo &, FModuleOp);
229 
230  Demand getDemandFor(Operation *op) const { return table.lookup(op); }
231 
232  /// Starting at effectful ops and output ports, propagate the demand of values
233  /// through the design, running until fixpoint. At the end, we have an
234  /// accurate picture of where every operation can be sunk, while preserving
235  /// effects in the program.
236  void run(const EffectInfo &, FModuleOp, WorkStack &);
237 
238  /// Update the demand for the given op. If the demand changes, place the op
239  /// onto the worklist.
240  void update(WorkStack &work, Operation *op, Demand demand) {
241  if (table[op].mergeIn(clamp(op, demand)))
242  work.push_back(op);
243  }
244 
245  void update(WorkStack &work, Value value, Demand demand) {
246  if (auto result = dyn_cast<OpResult>(value))
247  update(work, cast<OpResult>(value).getOwner(), demand);
248  }
249 
250  void updateConnects(WorkStack &, Value, Demand);
251  void updateConnects(WorkStack &, Operation *, Demand);
252 
253  llvm::DenseMap<Operation *, Demand> table;
254 };
255 } // namespace
256 
257 DemandInfo::DemandInfo(const EffectInfo &effectInfo, FModuleOp moduleOp) {
258  WorkStack work;
259  Block *body = moduleOp.getBodyBlock();
260  ArrayRef<bool> dirs = moduleOp.getPortDirections();
261  for (unsigned i = 0, e = moduleOp.getNumPorts(); i < e; ++i)
262  if (direction::get(dirs[i]) == Direction::Out)
263  updateConnects(work, body->getArgument(i), moduleOp.getBodyBlock());
264  moduleOp.getBodyBlock()->walk([&](Operation *op) {
265  if (effectInfo.effectful(op))
266  update(work, op, op->getBlock());
267  });
268  run(effectInfo, moduleOp, work);
269 }
270 
271 void DemandInfo::run(const EffectInfo &effectInfo, FModuleOp, WorkStack &work) {
272  while (!work.empty()) {
273  auto *op = work.back();
274  work.pop_back();
275  auto demand = getDemandFor(op);
276  for (auto operand : op->getOperands())
277  update(work, operand, demand);
278  updateConnects(work, op, demand);
279  }
280 }
281 
282 // The value represents a hardware declaration, such as a wire or port. Search
283 // backwards through uses, looking through aliasing operations such as
284 // subfields, to find connects that drive the given value. All driving
285 // connects of a value are demanded by the same region as the value. If the
286 // demand of the connect op is updated, then the demand will propagate
287 // forwards to its operands through the normal forward-propagation of demand.
288 void DemandInfo::updateConnects(WorkStack &work, Value value, Demand demand) {
289  struct StackElement {
290  Value value;
291  Value::user_iterator it;
292  };
293 
294  SmallVector<StackElement> stack;
295  stack.push_back({value, value.user_begin()});
296  while (!stack.empty()) {
297  auto &top = stack.back();
298  auto end = top.value.user_end();
299  while (true) {
300  if (top.it == end) {
301  stack.pop_back();
302  break;
303  }
304  auto *user = *(top.it++);
305  if (auto connect = dyn_cast<FConnectLike>(user)) {
306  if (connect.getDest() == top.value) {
307  update(work, connect, demand);
308  }
309  continue;
310  }
311  if (isa<SubfieldOp, SubindexOp, SubaccessOp, ObjectSubfieldOp>(user)) {
312  for (auto result : user->getResults())
313  stack.push_back({result, result.user_begin()});
314  break;
315  }
316  }
317  }
318 }
319 
320 void DemandInfo::updateConnects(WorkStack &work, Operation *op, Demand demand) {
321  if (isa<WireOp, RegResetOp, RegOp, MemOp, ObjectOp>(op)) {
322  for (auto result : op->getResults())
323  updateConnects(work, result, demand);
324  } else if (auto inst = dyn_cast<InstanceOp>(op)) {
325  auto dirs = inst.getPortDirections();
326  for (unsigned i = 0, e = inst->getNumResults(); i < e; ++i) {
327  if (direction::get(dirs[i]) == Direction::In)
328  updateConnects(work, inst.getResult(i), demand);
329  }
330  }
331 }
332 
333 //===----------------------------------------------------------------------===//
334 // ModuleLayerSink
335 //===----------------------------------------------------------------------===//
336 
337 namespace {
338 class ModuleLayerSink {
339 public:
340  static bool run(FModuleOp moduleOp, const EffectInfo &effectInfo) {
341  return ModuleLayerSink(moduleOp, effectInfo)();
342  }
343 
344 private:
345  ModuleLayerSink(FModuleOp moduleOp, const EffectInfo &effectInfo)
346  : moduleOp(moduleOp), effectInfo(effectInfo) {}
347 
348  bool operator()();
349  void moveLayersToBack(Operation *op);
350  void moveLayersToBack(Block *block);
351 
352  FModuleOp moduleOp;
353  const EffectInfo &effectInfo;
354  bool changed = false;
355 };
356 } // namespace
357 
358 // NOLINTNEXTLINE(misc-no-recursion)
359 void ModuleLayerSink::moveLayersToBack(Operation *op) {
360  for (auto &r : op->getRegions())
361  for (auto &b : r.getBlocks())
362  moveLayersToBack(&b);
363 }
364 
365 // NOLINTNEXTLINE(misc-no-recursion)
366 void ModuleLayerSink::moveLayersToBack(Block *block) {
367  auto i = block->rbegin();
368  auto e = block->rend();
369 
370  // Leave layerblocks that are already "at the back" where they are.
371  while (i != e) {
372  auto *op = &*i;
373  moveLayersToBack(op);
374  if (!isa<LayerBlockOp>(op))
375  break;
376  ++i;
377  }
378 
379  if (i == e)
380  return;
381 
382  // We have found a non-layerblock op at position `i`.
383  // Any block in front of `i`, will be moved to after `i`.
384  auto *ip = &*i;
385  while (i != e) {
386  auto *op = &*i;
387  moveLayersToBack(op);
388  if (isa<LayerBlockOp>(op)) {
389  ++i;
390  op->moveAfter(ip);
391  changed = true;
392  } else {
393  ++i;
394  }
395  }
396 }
397 
398 bool ModuleLayerSink::operator()() {
399  moveLayersToBack(moduleOp.getBodyBlock());
400  DemandInfo demandInfo(effectInfo, moduleOp);
401  walkBwd(moduleOp.getBodyBlock(), [&](Operation *op) {
402  auto demand = demandInfo.getDemandFor(op);
403  if (!demand) {
404  op->erase();
405  changed = true;
406  return;
407  }
408 
409  if (demand == op->getBlock())
410  return;
411 
412  op->moveBefore(demand.block, demand.block->begin());
413  changed = true;
414  });
415 
416  return changed;
417 }
418 
419 //===----------------------------------------------------------------------===//
420 // LayerSinkPass
421 //===----------------------------------------------------------------------===//
422 
423 namespace {
424 /// A control-flow sink pass.
425 struct AdvancedLayerSinkPass final
426  : public circt::firrtl::impl::AdvancedLayerSinkBase<AdvancedLayerSinkPass> {
427  void runOnOperation() override;
428 };
429 } // namespace
430 
431 void AdvancedLayerSinkPass::runOnOperation() {
432  auto circuit = getOperation();
433  LLVM_DEBUG(debugPassHeader(this)
434  << "\n"
435  << "Circuit: '" << circuit.getName() << "'\n";);
436  auto &instanceGraph = getAnalysis<InstanceGraph>();
437  EffectInfo effectInfo(circuit, instanceGraph);
438 
439  std::atomic<bool> changed(false);
440  parallelForEach(&getContext(), circuit.getOps<FModuleOp>(),
441  [&](FModuleOp moduleOp) {
442  if (ModuleLayerSink::run(moduleOp, effectInfo))
443  changed = true;
444  });
445 
446  if (!changed)
447  markAllAnalysesPreserved();
448  else
449  markAnalysesPreserved<InstanceGraph>();
450 }
451 
452 //===----------------------------------------------------------------------===//
453 // Pass Constructor
454 //===----------------------------------------------------------------------===//
455 
456 std::unique_ptr<mlir::Pass> circt::firrtl::createAdvancedLayerSinkPass() {
457  return std::make_unique<AdvancedLayerSinkPass>();
458 }
static bool isBarrier(Operation *op)
True if we are prevented from sinking operations into the regions of the op.
static Demand clamp(Operation *op, Demand demand)
Adjust the demand based on the location of the op being demanded.
static bool isAncestor(Block *block, Block *other)
static bool isValidDest(Operation *op)
True if this operation is a good site to sink operations.
assert(baseType &&"element must be base type")
This class provides a read-only projection over the MLIR attributes that represent a set of annotatio...
This graph tracks modules and where they are instantiated.
def connect(destination, source)
Definition: support.py:39
static Direction get(bool isOutput)
Return an output direction if isOutput is true, otherwise return an input direction.
bool hasDontTouch(Value value)
Check whether a block argument ("port") or the operation defining a value has a DontTouch annotation,...
Definition: FIRRTLOps.cpp:317
std::unique_ptr< mlir::Pass > createAdvancedLayerSinkPass()
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition: DebugAnalysis.h:21
bool operator==(uint64_t a, const FVInt &b)
Definition: FVInt.h:649
bool operator!=(uint64_t a, const FVInt &b)
Definition: FVInt.h:650
llvm::raw_ostream & debugPassHeader(const mlir::Pass *pass, int width=80)
Write a boilerplate header for a pass to the debug stream.
Definition: Debug.cpp:31
int run(Type[Generator] generator=CppGenerator, cmdline_args=sys.argv)
Definition: codegen.py:121