Loading [MathJax]/extensions/tex2jax.js
CIRCT 22.0.0git
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
LayerSink.cpp
Go to the documentation of this file.
1//===- LayerSink.cpp - Sink ops into layer blocks -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass sinks operations into layer blocks.
10//
11//===----------------------------------------------------------------------===//
12
16#include "circt/Support/Debug.h"
17#include "mlir/IR/Dominance.h"
18#include "mlir/IR/Threading.h"
19#include "mlir/Interfaces/SideEffectInterfaces.h"
20#include "mlir/Pass/Pass.h"
21#include "mlir/Transforms/ControlFlowSinkUtils.h"
22#include "llvm/ADT/PostOrderIterator.h"
23
24#define DEBUG_TYPE "firrtl-layer-sink"
25
26namespace circt {
27namespace firrtl {
28#define GEN_PASS_DEF_LAYERSINK
29#include "circt/Dialect/FIRRTL/Passes.h.inc"
30} // namespace firrtl
31} // namespace circt
32
33using namespace circt;
34using namespace firrtl;
35using namespace mlir;
36
37//===----------------------------------------------------------------------===//
38// Helpers
39//===----------------------------------------------------------------------===//
40
41// NOLINTBEGIN(misc-no-recursion)
42namespace {
43/// Walk the ops in `block` bottom-up, back-to-front order. The block walk API
44/// in upstream MLIR, although it takes an Iterator parameter, will always walk
45/// the top block front-to-back. This walk function will actually walk the ops
46/// under `block` back-to-front.
47template <typename T>
48void walkBwd(Block *block, T &&f) {
49 for (auto &op :
50 llvm::make_early_inc_range(llvm::reverse(block->getOperations()))) {
51 for (auto &region : op.getRegions())
52 for (auto &block : region.getBlocks())
53 walkBwd(&block, f);
54 f(&op);
55 }
56}
57} // namespace
58// NOLINTEND(misc-no-recursion)
59
60static bool isAncestor(Block *block, Block *other) {
61 if (block == other)
62 return true;
63 return block->getParent()->isProperAncestor(other->getParent());
64}
65
66//===----------------------------------------------------------------------===//
67// Clone-ability.
68//===----------------------------------------------------------------------===//
69
70static bool cloneable(Operation *op) {
71 // Clone any constants.
72 if (op->hasTrait<OpTrait::ConstantLike>())
73 return true;
74
75 // Clone probe ops, to minimize residue within the design.
76 if (isa<RefSendOp, RefResolveOp, RefCastOp, RefSubOp>(op))
77 return true;
78
79 return false;
80}
81
82//===----------------------------------------------------------------------===//
83// Effectfulness Analysis.
84//===----------------------------------------------------------------------===//
85
86/// A table that can determine whether an operation is effectful.
87namespace {
88class EffectInfo {
89public:
90 EffectInfo(CircuitOp circuit, InstanceGraph &instanceGraph) {
91 DenseSet<InstanceGraphNode *> visited;
92 for (auto *root : instanceGraph) {
93 for (auto *node : llvm::post_order_ext(root, visited)) {
94 auto *op = node->getModule().getOperation();
95 update(op);
96 }
97 }
98 }
99
100 /// True if the given operation is NOT moveable due to some effect.
101 bool effectful(Operation *op) const {
102 if (!AnnotationSet(op).empty() || hasDontTouch(op))
103 return true;
104 if (auto name = dyn_cast<FNamableOp>(op))
105 if (!name.hasDroppableName())
106 return true;
107 if (op->getNumRegions() != 0)
108 return true;
109 if (auto instance = dyn_cast<InstanceOp>(op))
110 return effectfulModules.contains(instance.getModuleNameAttr().getAttr());
111 if (isa<FConnectLike, WireOp, RegResetOp, RegOp, MemOp, NodeOp>(op))
112 return false;
113 return !(mlir::isMemoryEffectFree(op) ||
114 mlir::hasSingleEffect<mlir::MemoryEffects::Allocate>(op) ||
115 mlir::hasSingleEffect<mlir::MemoryEffects::Read>(op));
116 }
117
118private:
119 /// Record whether the module contains any effectful ops.
120 void update(FModuleOp moduleOp) {
121 moduleOp.getBodyBlock()->walk([&](Operation *op) {
122 if (effectful(op)) {
123 markEffectful(moduleOp);
124 return WalkResult::interrupt();
125 }
126 return WalkResult::advance();
127 });
128 }
129
130 /// Record whether the module-like op contains any effectful op.
131 void update(FModuleLike moduleOp) {
132 // If the module op has any annotations, then we pretend the module contains
133 // some kind of important effect, so that we cannot sink its instances.
134 if (auto annos = getAnnotationsIfPresent(moduleOp))
135 if (!annos.empty())
136 return markEffectful(moduleOp);
137
138 for (auto annos : moduleOp.getPortAnnotations())
139 if (!cast<ArrayAttr>(annos).empty())
140 return markEffectful(moduleOp);
141
142 auto *op = moduleOp.getOperation();
143 // Regular modules may be pure.
144 if (auto m = dyn_cast<FModuleOp>(op))
145 return update(m);
146 // Memory modules are pure.
147 if (auto m = dyn_cast<FMemModuleOp>(op))
148 return;
149 // All other kinds of modules are effectful.
150 // intmodules, extmodules, classes.
151 return markEffectful(moduleOp);
152 }
153
154 void update(Operation *op) {
155 if (auto moduleOp = dyn_cast<FModuleLike>(op))
156 update(moduleOp);
157 }
158
159 /// Record that the given module contains an effectful operation.
160 void markEffectful(FModuleLike moduleOp) {
161 effectfulModules.insert(moduleOp.getModuleNameAttr());
162 }
163
164 DenseSet<StringAttr> effectfulModules;
165};
166} // namespace
167
168//===----------------------------------------------------------------------===//
169// Demands Analysis.
170//===----------------------------------------------------------------------===//
171
172namespace {
173/// The LCA of the blocks in which a value is used/demanded. Lattice value.
174struct Demand {
175 constexpr Demand() : Demand(nullptr) {}
176 constexpr Demand(Block *block) : block(block) {}
177
178 constexpr Demand merge(Demand other) const {
179 if (block == other.block)
180 return Demand(block);
181 if (other.block == nullptr)
182 return Demand(block);
183 if (block == nullptr)
184 return Demand(other.block);
185
186 auto *b = block;
187 while (b && !isAncestor(b, other.block))
188 b = b->getParentOp()->getBlock();
189
190 return Demand(b);
191 }
192
193 bool mergeIn(Demand other) {
194 auto prev = *this;
195 auto next = merge(other);
196 *this = next;
197 return prev != next;
198 }
199
200 constexpr bool operator==(Demand rhs) const { return block == rhs.block; }
201 constexpr bool operator!=(Demand rhs) const { return block != rhs.block; }
202 constexpr operator bool() const { return block; }
203
204 Block *block;
205};
206} // namespace
207
208/// True if this operation is a good site to sink operations.
209static bool isValidDest(Operation *op) {
210 return op && (isa<LayerBlockOp>(op) || isa<FModuleOp>(op));
211}
212
213/// True if we are prevented from sinking operations into the regions of the op.
214static bool isBarrier(Operation *op) { return !isValidDest(op); }
215
216/// Adjust the demand based on the location of the op being demanded. Ideally,
217/// we can sink an op directly to its site of use. However, there are two
218/// issues.
219///
220/// 1) Typically, an op will dominate every demand, but for hardware
221/// declarations such as wires, the declaration will demand any connections
222/// driving it. In this case, the relationship is reversed: the demander
223/// dominates the demandee. This can cause us to pull connect-like ops up and
224/// and out of their enclosing block (ref-defines can be buried under
225/// layerblocks). To avoid this, we set an upper bound on the demand: the
226/// enclosing block of the demandee.
227///
228/// 2) not all regions are valid sink targets. If there is a sink-barrier
229/// between the operation and its demand, we adjust the demand upwards so that
230/// there is no sink barrier between the demandee and the demand site.
231static Demand clamp(Operation *op, Demand demand) {
232 if (!demand)
233 return nullptr;
234
235 auto *upper = op->getBlock();
236 assert(upper && "this should not be called on a top-level operation.");
237
238 if (!isAncestor(upper, demand.block))
239 demand = upper;
240
241 for (auto *i = demand.block; i != upper; i = i->getParentOp()->getBlock())
242 if (isBarrier(i->getParentOp()))
243 demand = i->getParentOp()->getBlock();
244
245 return demand;
246}
247
248namespace {
249struct DemandInfo {
250 using WorkStack = std::vector<Operation *>;
251
252 DemandInfo(const EffectInfo &, FModuleOp);
253
254 Demand getDemandFor(Operation *op) const { return table.lookup(op); }
255
256 /// Starting at effectful ops and output ports, propagate the demand of values
257 /// through the design, running until fixpoint. At the end, we have an
258 /// accurate picture of where every operation can be sunk, while preserving
259 /// effects in the program.
260 void run(const EffectInfo &, FModuleOp, WorkStack &);
261
262 /// Update the demand for the given op. If the demand changes, place the op
263 /// onto the worklist.
264 void update(WorkStack &work, Operation *op, Demand demand) {
265 if (table[op].mergeIn(clamp(op, demand)))
266 work.push_back(op);
267 }
268
269 void update(WorkStack &work, Value value, Demand demand) {
270 if (auto result = dyn_cast<OpResult>(value))
271 update(work, cast<OpResult>(value).getOwner(), demand);
272 }
273
274 void updateConnects(WorkStack &, Value, Demand);
275 void updateConnects(WorkStack &, Operation *, Demand);
276
277 llvm::DenseMap<Operation *, Demand> table;
278};
279} // namespace
280
281DemandInfo::DemandInfo(const EffectInfo &effectInfo, FModuleOp moduleOp) {
282 WorkStack work;
283 Block *body = moduleOp.getBodyBlock();
284 ArrayRef<bool> dirs = moduleOp.getPortDirections();
285 for (unsigned i = 0, e = moduleOp.getNumPorts(); i < e; ++i)
286 if (direction::get(dirs[i]) == Direction::Out)
287 updateConnects(work, body->getArgument(i), moduleOp.getBodyBlock());
288 moduleOp.getBodyBlock()->walk([&](Operation *op) {
289 if (effectInfo.effectful(op))
290 update(work, op, op->getBlock());
291 });
292 run(effectInfo, moduleOp, work);
293}
294
295void DemandInfo::run(const EffectInfo &effectInfo, FModuleOp, WorkStack &work) {
296 while (!work.empty()) {
297 auto *op = work.back();
298 work.pop_back();
299 auto demand = getDemandFor(op);
300 for (auto operand : op->getOperands())
301 update(work, operand, demand);
302 updateConnects(work, op, demand);
303 }
304}
305
306// The value represents a hardware declaration, such as a wire or port. Search
307// backwards through uses, looking through aliasing operations such as
308// subfields, to find connects that drive the given value. All driving
309// connects of a value are demanded by the same region as the value. If the
310// demand of the connect op is updated, then the demand will propagate
311// forwards to its operands through the normal forward-propagation of demand.
312void DemandInfo::updateConnects(WorkStack &work, Value value, Demand demand) {
313 struct StackElement {
314 Value value;
315 Value::user_iterator it;
316 };
317
318 SmallVector<StackElement> stack;
319 stack.push_back({value, value.user_begin()});
320 while (!stack.empty()) {
321 auto &top = stack.back();
322 auto end = top.value.user_end();
323 while (true) {
324 if (top.it == end) {
325 stack.pop_back();
326 break;
327 }
328 auto *user = *(top.it++);
329 if (auto connect = dyn_cast<FConnectLike>(user)) {
330 if (connect.getDest() == top.value) {
331 update(work, connect, demand);
332 }
333 continue;
334 }
335 if (isa<SubfieldOp, SubindexOp, SubaccessOp, ObjectSubfieldOp>(user)) {
336 for (auto result : user->getResults())
337 stack.push_back({result, result.user_begin()});
338 break;
339 }
340 }
341 }
342}
343
344void DemandInfo::updateConnects(WorkStack &work, Operation *op, Demand demand) {
345 if (isa<WireOp, RegResetOp, RegOp, MemOp, ObjectOp>(op)) {
346 for (auto result : op->getResults())
347 updateConnects(work, result, demand);
348 } else if (auto inst = dyn_cast<InstanceOp>(op)) {
349 auto dirs = inst.getPortDirections();
350 for (unsigned i = 0, e = inst->getNumResults(); i < e; ++i) {
351 if (direction::get(dirs[i]) == Direction::In)
352 updateConnects(work, inst.getResult(i), demand);
353 }
354 }
355}
356
357//===----------------------------------------------------------------------===//
358// ModuleLayerSink
359//===----------------------------------------------------------------------===//
360
361namespace {
362class ModuleLayerSink {
363public:
364 static bool run(FModuleOp moduleOp, const EffectInfo &effectInfo) {
365 return ModuleLayerSink(moduleOp, effectInfo)();
366 }
367
368private:
369 ModuleLayerSink(FModuleOp moduleOp, const EffectInfo &effectInfo)
370 : moduleOp(moduleOp), effectInfo(effectInfo) {}
371
372 bool operator()();
373 void moveLayersToBack(Operation *op);
374 void moveLayersToBack(Block *block);
375
376 void erase(Operation *op);
377 void sinkOpByCloning(Operation *op);
378 void sinkOpByMoving(Operation *op, Block *dest);
379
380 FModuleOp moduleOp;
381 const EffectInfo &effectInfo;
382 bool changed = false;
383};
384} // namespace
385
386// NOLINTNEXTLINE(misc-no-recursion)
387void ModuleLayerSink::moveLayersToBack(Operation *op) {
388 for (auto &r : op->getRegions())
389 for (auto &b : r.getBlocks())
390 moveLayersToBack(&b);
391}
392
393// NOLINTNEXTLINE(misc-no-recursion)
394void ModuleLayerSink::moveLayersToBack(Block *block) {
395 auto i = block->rbegin();
396 auto e = block->rend();
397
398 // Leave layerblocks that are already "at the back" where they are.
399 while (i != e) {
400 auto *op = &*i;
401 moveLayersToBack(op);
402 if (!isa<LayerBlockOp>(op))
403 break;
404 ++i;
405 }
406
407 if (i == e)
408 return;
409
410 // We have found a non-layerblock op at position `i`.
411 // Any block in front of `i`, will be moved to after `i`.
412 auto *ip = &*i;
413 while (i != e) {
414 auto *op = &*i;
415 moveLayersToBack(op);
416 if (isa<LayerBlockOp>(op)) {
417 ++i;
418 op->moveAfter(ip);
419 changed = true;
420 } else {
421 ++i;
422 }
423 }
424}
425
426void ModuleLayerSink::erase(Operation *op) {
427 op->erase();
428 changed = true;
429}
430
431void ModuleLayerSink::sinkOpByCloning(Operation *op) {
432 // Copy the op down to each block where there is a user.
433 // Use a cache to ensure we don't create more than one copy per block.
434 auto *src = op->getBlock();
435 DenseMap<Block *, Operation *> cache;
436 for (unsigned i = 0, e = op->getNumResults(); i < e; ++i) {
437 for (auto &use : llvm::make_early_inc_range(op->getResult(i).getUses())) {
438 auto *dst = use.getOwner()->getBlock();
439 if (dst != src) {
440 auto &clone = cache[dst];
441 if (!clone)
442 clone = OpBuilder::atBlockBegin(dst).clone(*op);
443 use.set(clone->getResult(i));
444 changed = true;
445 }
446 }
447 }
448
449 // If there are no remaining uses of the original op, erase it.
450 if (isOpTriviallyDead(op))
451 erase(op);
452}
453
454void ModuleLayerSink::sinkOpByMoving(Operation *op, Block *dst) {
455 if (dst != op->getBlock()) {
456 op->moveBefore(dst, dst->begin());
457 changed = true;
458 }
459}
460
461bool ModuleLayerSink::operator()() {
462 moveLayersToBack(moduleOp.getBodyBlock());
463 DemandInfo demandInfo(effectInfo, moduleOp);
464 walkBwd(moduleOp.getBodyBlock(), [&](Operation *op) {
465 auto demand = demandInfo.getDemandFor(op);
466 if (!demand)
467 return erase(op);
468 if (cloneable(op))
469 return sinkOpByCloning(op);
470 sinkOpByMoving(op, demand.block);
471 });
472
473 return changed;
474}
475
476//===----------------------------------------------------------------------===//
477// LayerSinkPass
478//===----------------------------------------------------------------------===//
479
480namespace {
481/// A control-flow sink pass.
482struct LayerSinkPass final
483 : public circt::firrtl::impl::LayerSinkBase<LayerSinkPass> {
484 void runOnOperation() override;
485};
486} // namespace
487
488void LayerSinkPass::runOnOperation() {
489 auto circuit = getOperation();
490 LLVM_DEBUG(debugPassHeader(this)
491 << "\n"
492 << "Circuit: '" << circuit.getName() << "'\n";);
493 auto &instanceGraph = getAnalysis<InstanceGraph>();
494 EffectInfo effectInfo(circuit, instanceGraph);
495
496 std::atomic<bool> changed(false);
497 parallelForEach(&getContext(), circuit.getOps<FModuleOp>(),
498 [&](FModuleOp moduleOp) {
499 if (ModuleLayerSink::run(moduleOp, effectInfo))
500 changed = true;
501 });
502
503 if (!changed)
504 markAllAnalysesPreserved();
505 else
506 markAnalysesPreserved<InstanceGraph>();
507}
assert(baseType &&"element must be base type")
static bool isBarrier(Operation *op)
True if we are prevented from sinking operations into the regions of the op.
static Demand clamp(Operation *op, Demand demand)
Adjust the demand based on the location of the op being demanded.
static bool isAncestor(Block *block, Block *other)
Definition LayerSink.cpp:60
static bool isValidDest(Operation *op)
True if this operation is a good site to sink operations.
static bool cloneable(Operation *op)
Definition LayerSink.cpp:70
static InstancePath empty
This class provides a read-only projection over the MLIR attributes that represent a set of annotatio...
This graph tracks modules and where they are instantiated.
connect(destination, source)
Definition support.py:39
static Direction get(bool isOutput)
Return an output direction if isOutput is true, otherwise return an input direction.
bool hasDontTouch(Value value)
Check whether a block argument ("port") or the operation defining a value has a DontTouch annotation,...
ArrayAttr getAnnotationsIfPresent(Operation *op)
static bool operator==(const ModulePort &a, const ModulePort &b)
Definition HWTypes.h:35
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
bool operator!=(uint64_t a, const FVInt &b)
Definition FVInt.h:651
llvm::raw_ostream & debugPassHeader(const mlir::Pass *pass, int width=80)
Write a boilerplate header for a pass to the debug stream.
Definition Debug.cpp:31
int run(Type[Generator] generator=CppGenerator, cmdline_args=sys.argv)
Definition codegen.py:121