CIRCT 23.0.0git
Loading...
Searching...
No Matches
LayerSink.cpp
Go to the documentation of this file.
1//===- LayerSink.cpp - Sink ops into layer blocks -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass sinks operations into layer blocks.
10//
11//===----------------------------------------------------------------------===//
12
15#include "circt/Support/Debug.h"
16#include "mlir/IR/Dominance.h"
17#include "mlir/IR/Threading.h"
18#include "mlir/Interfaces/SideEffectInterfaces.h"
19#include "mlir/Transforms/ControlFlowSinkUtils.h"
20
21#define DEBUG_TYPE "firrtl-layer-sink"
22
23namespace circt {
24namespace firrtl {
25#define GEN_PASS_DEF_LAYERSINK
26#include "circt/Dialect/FIRRTL/Passes.h.inc"
27} // namespace firrtl
28} // namespace circt
29
30using namespace circt;
31using namespace firrtl;
32using namespace mlir;
33
34//===----------------------------------------------------------------------===//
35// Helpers
36//===----------------------------------------------------------------------===//
37
38// NOLINTBEGIN(misc-no-recursion)
39namespace {
40/// Walk the ops in `block` bottom-up, back-to-front order. The block walk API
41/// in upstream MLIR, although it takes an Iterator parameter, will always walk
42/// the top block front-to-back. This walk function will actually walk the ops
43/// under `block` back-to-front.
44template <typename T>
45void walkBwd(Block *block, T &&f) {
46 for (auto &op :
47 llvm::make_early_inc_range(llvm::reverse(block->getOperations()))) {
48 for (auto &region : op.getRegions())
49 for (auto &block : region.getBlocks())
50 walkBwd(&block, f);
51 f(&op);
52 }
53}
54} // namespace
55// NOLINTEND(misc-no-recursion)
56
57static bool isAncestor(Block *block, Block *other) {
58 if (block == other)
59 return true;
60 return block->getParent()->isProperAncestor(other->getParent());
61}
62
63//===----------------------------------------------------------------------===//
64// Clone-ability.
65//===----------------------------------------------------------------------===//
66
67static bool cloneable(Operation *op) {
68 // Clone any constants.
69 if (op->hasTrait<OpTrait::ConstantLike>())
70 return true;
71
72 // Clone probe ops, to minimize residue within the design.
73 if (isa<RefSendOp, RefResolveOp, RefCastOp, RefSubOp>(op))
74 return true;
75
76 return false;
77}
78
79//===----------------------------------------------------------------------===//
80// Effectfulness Analysis.
81//===----------------------------------------------------------------------===//
82
83/// A table that can determine whether an operation is effectful.
84namespace {
85class EffectInfo {
86public:
87 EffectInfo(CircuitOp circuit, InstanceGraph &instanceGraph) {
88 instanceGraph.walkPostOrder(
89 [&](auto &node) { update(node.getModule().getOperation()); });
90 }
91
92 /// True if the given operation is NOT moveable due to some effect.
93 bool effectful(Operation *op) const {
94 if (!AnnotationSet(op).empty() || hasDontTouch(op))
95 return true;
96 if (auto name = dyn_cast<FNamableOp>(op))
97 if (!name.hasDroppableName())
98 return true;
99 if (op->getNumRegions() != 0)
100 return true;
101 return TypeSwitch<Operation *, bool>(op)
102 .Case<InstanceOp, InstanceChoiceOp>([&](auto op) {
103 for (auto module : op.getReferencedModuleNamesAttr())
104 if (effectfulModules.contains(cast<StringAttr>(module)))
105 return true;
106 return false;
107 })
108 .Case<FConnectLike, WireOp, RegResetOp, RegOp, MemOp, NodeOp>(
109 [](auto) { return false; })
110 .Default([](auto op) {
111 return !(mlir::isMemoryEffectFree(op) ||
112 mlir::hasSingleEffect<mlir::MemoryEffects::Allocate>(op) ||
113 mlir::hasSingleEffect<mlir::MemoryEffects::Read>(op));
114 });
115 }
116
117private:
118 /// Record whether the module contains any effectful ops.
119 void update(FModuleOp moduleOp) {
120 moduleOp.getBodyBlock()->walk([&](Operation *op) {
121 if (effectful(op)) {
122 markEffectful(moduleOp);
123 return WalkResult::interrupt();
124 }
125 return WalkResult::advance();
126 });
127 }
128
129 /// Record whether the module-like op contains any effectful op.
130 void update(FModuleLike moduleOp) {
131 // If the module op has any annotations, then we pretend the module contains
132 // some kind of important effect, so that we cannot sink its instances.
133 if (auto annos = getAnnotationsIfPresent(moduleOp))
134 if (!annos.empty())
135 return markEffectful(moduleOp);
136
137 for (auto annos : moduleOp.getPortAnnotations())
138 if (!cast<ArrayAttr>(annos).empty())
139 return markEffectful(moduleOp);
140
141 auto *op = moduleOp.getOperation();
142 // Regular modules may be pure.
143 if (auto m = dyn_cast<FModuleOp>(op))
144 return update(m);
145 // Memory modules are pure.
146 if (auto m = dyn_cast<FMemModuleOp>(op))
147 return;
148 // All other kinds of modules are effectful.
149 // intmodules, extmodules, classes.
150 return markEffectful(moduleOp);
151 }
152
153 void update(Operation *op) {
154 if (auto moduleOp = dyn_cast<FModuleLike>(op))
155 update(moduleOp);
156 }
157
158 /// Record that the given module contains an effectful operation.
159 void markEffectful(FModuleLike moduleOp) {
160 effectfulModules.insert(moduleOp.getModuleNameAttr());
161 }
162
163 DenseSet<StringAttr> effectfulModules;
164};
165} // namespace
166
167//===----------------------------------------------------------------------===//
168// Demands Analysis.
169//===----------------------------------------------------------------------===//
170
171namespace {
172/// The LCA of the blocks in which a value is used/demanded. Lattice value.
173struct Demand {
174 constexpr Demand() : Demand(nullptr) {}
175 constexpr Demand(Block *block) : block(block) {}
176
177 constexpr Demand merge(Demand other) const {
178 if (block == other.block)
179 return Demand(block);
180 if (other.block == nullptr)
181 return Demand(block);
182 if (block == nullptr)
183 return Demand(other.block);
184
185 auto *b = block;
186 while (b && !isAncestor(b, other.block))
187 b = b->getParentOp()->getBlock();
188
189 return Demand(b);
190 }
191
192 bool mergeIn(Demand other) {
193 auto prev = *this;
194 auto next = merge(other);
195 *this = next;
196 return prev != next;
197 }
198
199 constexpr bool operator==(Demand rhs) const { return block == rhs.block; }
200 constexpr bool operator!=(Demand rhs) const { return block != rhs.block; }
201 constexpr operator bool() const { return block; }
202
203 Block *block;
204};
205} // namespace
206
207/// True if this operation is a good site to sink operations.
208static bool isValidDest(Operation *op) {
209 return op && (isa<LayerBlockOp>(op) || isa<FModuleOp>(op));
210}
211
212/// True if we are prevented from sinking operations into the regions of the op.
213static bool isBarrier(Operation *op) { return !isValidDest(op); }
214
215/// Adjust the demand based on the location of the op being demanded. Ideally,
216/// we can sink an op directly to its site of use. However, there are two
217/// issues.
218///
219/// 1) Typically, an op will dominate every demand, but for hardware
220/// declarations such as wires, the declaration will demand any connections
221/// driving it. In this case, the relationship is reversed: the demander
222/// dominates the demandee. This can cause us to pull connect-like ops up and
223/// and out of their enclosing block (ref-defines can be buried under
224/// layerblocks). To avoid this, we set an upper bound on the demand: the
225/// enclosing block of the demandee.
226///
227/// 2) not all regions are valid sink targets. If there is a sink-barrier
228/// between the operation and its demand, we adjust the demand upwards so that
229/// there is no sink barrier between the demandee and the demand site.
230static Demand clamp(Operation *op, Demand demand) {
231 if (!demand)
232 return nullptr;
233
234 auto *upper = op->getBlock();
235 assert(upper && "this should not be called on a top-level operation.");
236
237 if (!isAncestor(upper, demand.block))
238 demand = upper;
239
240 for (auto *i = demand.block; i != upper; i = i->getParentOp()->getBlock())
241 if (isBarrier(i->getParentOp()))
242 demand = i->getParentOp()->getBlock();
243
244 return demand;
245}
246
247namespace {
248struct DemandInfo {
249 using WorkStack = std::vector<Operation *>;
250
251 DemandInfo(const EffectInfo &, FModuleOp);
252
253 Demand getDemandFor(Operation *op) const { return table.lookup(op); }
254
255 /// Starting at effectful ops and output ports, propagate the demand of values
256 /// through the design, running until fixpoint. At the end, we have an
257 /// accurate picture of where every operation can be sunk, while preserving
258 /// effects in the program.
259 void run(const EffectInfo &, FModuleOp, WorkStack &);
260
261 /// Update the demand for the given op. If the demand changes, place the op
262 /// onto the worklist.
263 void update(WorkStack &work, Operation *op, Demand demand) {
264 if (table[op].mergeIn(clamp(op, demand)))
265 work.push_back(op);
266 }
267
268 void update(WorkStack &work, Value value, Demand demand) {
269 if (auto result = dyn_cast<OpResult>(value))
270 update(work, cast<OpResult>(value).getOwner(), demand);
271 }
272
273 void updateConnects(WorkStack &, Value, Demand);
274 void updateConnects(WorkStack &, Operation *, Demand);
275
276 llvm::DenseMap<Operation *, Demand> table;
277};
278} // namespace
279
280DemandInfo::DemandInfo(const EffectInfo &effectInfo, FModuleOp moduleOp) {
281 WorkStack work;
282 Block *body = moduleOp.getBodyBlock();
283 ArrayRef<bool> dirs = moduleOp.getPortDirections();
284 for (unsigned i = 0, e = moduleOp.getNumPorts(); i < e; ++i)
285 if (direction::get(dirs[i]) == Direction::Out)
286 updateConnects(work, body->getArgument(i), moduleOp.getBodyBlock());
287 moduleOp.getBodyBlock()->walk([&](Operation *op) {
288 if (effectInfo.effectful(op))
289 update(work, op, op->getBlock());
290 });
291 run(effectInfo, moduleOp, work);
292}
293
294void DemandInfo::run(const EffectInfo &effectInfo, FModuleOp, WorkStack &work) {
295 while (!work.empty()) {
296 auto *op = work.back();
297 work.pop_back();
298 auto demand = getDemandFor(op);
299 for (auto operand : op->getOperands())
300 update(work, operand, demand);
301 updateConnects(work, op, demand);
302 }
303}
304
305// The value represents a hardware declaration, such as a wire or port. Search
306// backwards through uses, looking through aliasing operations such as
307// subfields, to find connects that drive the given value. All driving
308// connects of a value are demanded by the same region as the value. If the
309// demand of the connect op is updated, then the demand will propagate
310// forwards to its operands through the normal forward-propagation of demand.
311void DemandInfo::updateConnects(WorkStack &work, Value value, Demand demand) {
312 struct StackElement {
313 Value value;
314 Value::user_iterator it;
315 };
316
317 SmallVector<StackElement> stack;
318 stack.push_back({value, value.user_begin()});
319 while (!stack.empty()) {
320 auto &top = stack.back();
321 auto end = top.value.user_end();
322 while (true) {
323 if (top.it == end) {
324 stack.pop_back();
325 break;
326 }
327 auto *user = *(top.it++);
328 if (auto connect = dyn_cast<FConnectLike>(user)) {
329 if (connect.getDest() == top.value) {
330 update(work, connect, demand);
331 }
332 continue;
333 }
334 if (isa<SubfieldOp, SubindexOp, SubaccessOp, ObjectSubfieldOp>(user)) {
335 for (auto result : user->getResults())
336 stack.push_back({result, result.user_begin()});
337 break;
338 }
339 }
340 }
341}
342
343void DemandInfo::updateConnects(WorkStack &work, Operation *op, Demand demand) {
344 TypeSwitch<Operation *>(op)
345 .Case<WireOp, RegResetOp, RegOp, MemOp, ObjectOp>([&](auto op) {
346 for (auto result : op->getResults())
347 updateConnects(work, result, demand);
348 })
349 .Case<InstanceOp, InstanceChoiceOp>([&](auto op) {
350 for (auto [i, dir] : llvm::enumerate(op.getPortDirections()))
351 if (direction::get(dir) == Direction::In)
352 updateConnects(work, op->getResult(i), demand);
353 });
354}
355
356//===----------------------------------------------------------------------===//
357// ModuleLayerSink
358//===----------------------------------------------------------------------===//
359
360namespace {
361class ModuleLayerSink {
362public:
363 static bool run(FModuleOp moduleOp, const EffectInfo &effectInfo) {
364 return ModuleLayerSink(moduleOp, effectInfo)();
365 }
366
367private:
368 ModuleLayerSink(FModuleOp moduleOp, const EffectInfo &effectInfo)
369 : moduleOp(moduleOp), effectInfo(effectInfo) {}
370
371 bool operator()();
372 void moveLayersToBack(Operation *op);
373 void moveLayersToBack(Block *block);
374
375 void erase(Operation *op);
376 void sinkOpByCloning(Operation *op);
377 void sinkOpByMoving(Operation *op, Block *dest);
378
379 FModuleOp moduleOp;
380 const EffectInfo &effectInfo;
381 bool changed = false;
382};
383} // namespace
384
385// NOLINTNEXTLINE(misc-no-recursion)
386void ModuleLayerSink::moveLayersToBack(Operation *op) {
387 for (auto &r : op->getRegions())
388 for (auto &b : r.getBlocks())
389 moveLayersToBack(&b);
390}
391
392// NOLINTNEXTLINE(misc-no-recursion)
393void ModuleLayerSink::moveLayersToBack(Block *block) {
394 auto i = block->rbegin();
395 auto e = block->rend();
396
397 // Leave layerblocks that are already "at the back" where they are.
398 while (i != e) {
399 auto *op = &*i;
400 moveLayersToBack(op);
401 if (!isa<LayerBlockOp>(op))
402 break;
403 ++i;
404 }
405
406 if (i == e)
407 return;
408
409 // We have found a non-layerblock op at position `i`.
410 // Any block in front of `i`, will be moved to after `i`.
411 auto *ip = &*i;
412 while (i != e) {
413 auto *op = &*i;
414 moveLayersToBack(op);
415 if (isa<LayerBlockOp>(op)) {
416 ++i;
417 op->moveAfter(ip);
418 changed = true;
419 } else {
420 ++i;
421 }
422 }
423}
424
425void ModuleLayerSink::erase(Operation *op) {
426 op->erase();
427 changed = true;
428}
429
430void ModuleLayerSink::sinkOpByCloning(Operation *op) {
431 // Copy the op down to each block where there is a user.
432 // Use a cache to ensure we don't create more than one copy per block.
433 auto *src = op->getBlock();
434 DenseMap<Block *, Operation *> cache;
435 for (unsigned i = 0, e = op->getNumResults(); i < e; ++i) {
436 for (auto &use : llvm::make_early_inc_range(op->getResult(i).getUses())) {
437 auto *dst = use.getOwner()->getBlock();
438 if (dst != src) {
439 auto &clone = cache[dst];
440 if (!clone)
441 clone = OpBuilder::atBlockBegin(dst).clone(*op);
442 use.set(clone->getResult(i));
443 changed = true;
444 }
445 }
446 }
447
448 // If there are no remaining uses of the original op, erase it.
449 if (isOpTriviallyDead(op))
450 erase(op);
451}
452
453void ModuleLayerSink::sinkOpByMoving(Operation *op, Block *dst) {
454 if (dst != op->getBlock()) {
455 op->moveBefore(dst, dst->begin());
456 changed = true;
457 }
458}
459
460bool ModuleLayerSink::operator()() {
461 moveLayersToBack(moduleOp.getBodyBlock());
462 DemandInfo demandInfo(effectInfo, moduleOp);
463 walkBwd(moduleOp.getBodyBlock(), [&](Operation *op) {
464 auto demand = demandInfo.getDemandFor(op);
465 if (!demand)
466 return erase(op);
467 if (cloneable(op))
468 return sinkOpByCloning(op);
469 sinkOpByMoving(op, demand.block);
470 });
471
472 return changed;
473}
474
475//===----------------------------------------------------------------------===//
476// LayerSinkPass
477//===----------------------------------------------------------------------===//
478
479namespace {
480/// A control-flow sink pass.
481struct LayerSinkPass final
482 : public circt::firrtl::impl::LayerSinkBase<LayerSinkPass> {
483 void runOnOperation() override;
484};
485} // namespace
486
487void LayerSinkPass::runOnOperation() {
489
490 auto circuit = getOperation();
491 LLVM_DEBUG(llvm::dbgs() << "Circuit: '" << circuit.getName() << "'\n");
492
493 auto &instanceGraph = getAnalysis<InstanceGraph>();
494 EffectInfo effectInfo(circuit, instanceGraph);
495
496 std::atomic<bool> changed(false);
497 parallelForEach(&getContext(), circuit.getOps<FModuleOp>(),
498 [&](FModuleOp moduleOp) {
499 if (ModuleLayerSink::run(moduleOp, effectInfo))
500 changed = true;
501 });
502
503 if (!changed)
504 markAllAnalysesPreserved();
505 else
506 markAnalysesPreserved<InstanceGraph>();
507}
assert(baseType &&"element must be base type")
static bool isBarrier(Operation *op)
True if we are prevented from sinking operations into the regions of the op.
static Demand clamp(Operation *op, Demand demand)
Adjust the demand based on the location of the op being demanded.
static bool isAncestor(Block *block, Block *other)
Definition LayerSink.cpp:57
static bool isValidDest(Operation *op)
True if this operation is a good site to sink operations.
static bool cloneable(Operation *op)
Definition LayerSink.cpp:67
static InstancePath empty
#define CIRCT_DEBUG_SCOPED_PASS_LOGGER(PASS)
Definition Debug.h:70
This class provides a read-only projection over the MLIR attributes that represent a set of annotatio...
This graph tracks modules and where they are instantiated.
decltype(auto) walkPostOrder(Fn &&fn)
Perform a post-order walk across the modules.
connect(destination, source)
Definition support.py:39
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition CalyxOps.cpp:55
static Direction get(bool isOutput)
Return an output direction if isOutput is true, otherwise return an input direction.
Definition FIRRTLEnums.h:36
Direction
This represents the direction of a single port.
Definition FIRRTLEnums.h:27
bool hasDontTouch(Value value)
Check whether a block argument ("port") or the operation defining a value has a DontTouch annotation,...
ArrayAttr getAnnotationsIfPresent(Operation *op)
static bool operator==(const ModulePort &a, const ModulePort &b)
Definition HWTypes.h:35
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
bool operator!=(uint64_t a, const FVInt &b)
Definition FVInt.h:685
int run(Type[Generator] generator=CppGenerator, cmdline_args=sys.argv)
Definition codegen.py:440