CIRCT 21.0.0git
Loading...
Searching...
No Matches
AdvancedLayerSink.cpp
Go to the documentation of this file.
1//===- AdvancedLayerSink.cpp - Sink ops into layer blocks -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass sinks operations into layer blocks.
10//
11//===----------------------------------------------------------------------===//
12
16#include "circt/Support/Debug.h"
17#include "mlir/IR/Dominance.h"
18#include "mlir/IR/Threading.h"
19#include "mlir/Interfaces/SideEffectInterfaces.h"
20#include "mlir/Pass/Pass.h"
21#include "mlir/Transforms/ControlFlowSinkUtils.h"
22#include "llvm/ADT/PostOrderIterator.h"
23
24#define DEBUG_TYPE "firrtl-layer-sink"
25
26namespace circt {
27namespace firrtl {
28#define GEN_PASS_DEF_ADVANCEDLAYERSINK
29#include "circt/Dialect/FIRRTL/Passes.h.inc"
30} // namespace firrtl
31} // namespace circt
32
33using namespace circt;
34using namespace firrtl;
35using namespace mlir;
36
37//===----------------------------------------------------------------------===//
38// Helpers
39//===----------------------------------------------------------------------===//
40
41// NOLINTBEGIN(misc-no-recursion)
42namespace {
43/// Walk the ops in `block` bottom-up, back-to-front order. The block walk API
44/// in upstream MLIR, although it takes an Iterator parameter, will always walk
45/// the top block front-to-back. This walk function will actually walk the ops
46/// under `block` back-to-front.
47template <typename T>
48void walkBwd(Block *block, T &&f) {
49 for (auto &op :
50 llvm::make_early_inc_range(llvm::reverse(block->getOperations()))) {
51 for (auto &region : op.getRegions())
52 for (auto &block : region.getBlocks())
53 walkBwd(&block, f);
54 f(&op);
55 }
56}
57} // namespace
58// NOLINTEND(misc-no-recursion)
59
60static bool isAncestor(Block *block, Block *other) {
61 if (block == other)
62 return true;
63 return block->getParent()->isProperAncestor(other->getParent());
64}
65
66//===----------------------------------------------------------------------===//
67// Clone-ability.
68//===----------------------------------------------------------------------===//
69
70static bool cloneable(Operation *op) {
71 return op->hasTrait<OpTrait::ConstantLike>();
72}
73
74//===----------------------------------------------------------------------===//
75// Effectfulness Analysis.
76//===----------------------------------------------------------------------===//
77
78/// A table that can determine whether an operation is effectful.
79namespace {
80class EffectInfo {
81public:
82 EffectInfo(CircuitOp circuit, InstanceGraph &instanceGraph) {
83 DenseSet<InstanceGraphNode *> visited;
84 for (auto *root : instanceGraph) {
85 for (auto *node : llvm::post_order_ext(root, visited)) {
86 auto *op = node->getModule().getOperation();
87 update(op);
88 }
89 }
90 }
91
92 /// True if the given operation is NOT moveable due to some effect.
93 bool effectful(Operation *op) const {
94 if (!AnnotationSet(op).empty() || hasDontTouch(op))
95 return true;
96 if (auto name = dyn_cast<FNamableOp>(op))
97 if (!name.hasDroppableName())
98 return true;
99 if (op->getNumRegions() != 0)
100 return true;
101 if (auto instance = dyn_cast<InstanceOp>(op))
102 return effectfulModules.contains(instance.getModuleNameAttr().getAttr());
103 if (isa<FConnectLike, WireOp, RegResetOp, RegOp, MemOp, NodeOp>(op))
104 return false;
105 return !(mlir::isMemoryEffectFree(op) ||
106 mlir::hasSingleEffect<mlir::MemoryEffects::Allocate>(op) ||
107 mlir::hasSingleEffect<mlir::MemoryEffects::Read>(op));
108 }
109
110private:
111 /// Record whether the module contains any effectful ops.
112 void update(FModuleOp moduleOp) {
113 moduleOp.getBodyBlock()->walk([&](Operation *op) {
114 if (effectful(op)) {
115 markEffectful(moduleOp);
116 return WalkResult::interrupt();
117 }
118 return WalkResult::advance();
119 });
120 }
121
122 /// Record whether the module-like op contains any effectful op.
123 void update(FModuleLike moduleOp) {
124 // If the module op has any annotations, then we pretend the module contains
125 // some kind of important effect, so that we cannot sink its instances.
126 if (auto annos = getAnnotationsIfPresent(moduleOp))
127 if (!annos.empty())
128 return markEffectful(moduleOp);
129
130 for (auto annos : moduleOp.getPortAnnotations())
131 if (!cast<ArrayAttr>(annos).empty())
132 return markEffectful(moduleOp);
133
134 auto *op = moduleOp.getOperation();
135 // Regular modules may be pure.
136 if (auto m = dyn_cast<FModuleOp>(op))
137 return update(m);
138 // Memory modules are pure.
139 if (auto m = dyn_cast<FMemModuleOp>(op))
140 return;
141 // All other kinds of modules are effectful.
142 // intmodules, extmodules, classes.
143 return markEffectful(moduleOp);
144 }
145
146 void update(Operation *op) {
147 if (auto moduleOp = dyn_cast<FModuleLike>(op))
148 update(moduleOp);
149 }
150
151 /// Record that the given module contains an effectful operation.
152 void markEffectful(FModuleLike moduleOp) {
153 effectfulModules.insert(moduleOp.getModuleNameAttr());
154 }
155
156 DenseSet<StringAttr> effectfulModules;
157};
158} // namespace
159
160//===----------------------------------------------------------------------===//
161// Demands Analysis.
162//===----------------------------------------------------------------------===//
163
164namespace {
165/// The LCA of the blocks in which a value is used/demanded. Lattice value.
166struct Demand {
167 constexpr Demand() : Demand(nullptr) {}
168 constexpr Demand(Block *block) : block(block) {}
169
170 constexpr Demand merge(Demand other) const {
171 if (block == other.block)
172 return Demand(block);
173 if (other.block == nullptr)
174 return Demand(block);
175 if (block == nullptr)
176 return Demand(other.block);
177
178 auto *b = block;
179 while (b && !isAncestor(b, other.block))
180 b = b->getParentOp()->getBlock();
181
182 return Demand(b);
183 }
184
185 bool mergeIn(Demand other) {
186 auto prev = *this;
187 auto next = merge(other);
188 *this = next;
189 return prev != next;
190 }
191
192 constexpr bool operator==(Demand rhs) const { return block == rhs.block; }
193 constexpr bool operator!=(Demand rhs) const { return block != rhs.block; }
194 constexpr operator bool() const { return block; }
195
196 Block *block;
197};
198} // namespace
199
200/// True if this operation is a good site to sink operations.
201static bool isValidDest(Operation *op) {
202 return op && (isa<LayerBlockOp>(op) || isa<FModuleOp>(op));
203}
204
205/// True if we are prevented from sinking operations into the regions of the op.
206static bool isBarrier(Operation *op) { return !isValidDest(op); }
207
208/// Adjust the demand based on the location of the op being demanded. Ideally,
209/// we can sink an op directly to its site of use. However, there are two
210/// issues.
211///
212/// 1) Typically, an op will dominate every demand, but for hardware
213/// declarations such as wires, the declaration will demand any connections
214/// driving it. In this case, the relationship is reversed: the demander
215/// dominates the demandee. This can cause us to pull connect-like ops up and
216/// and out of their enclosing block (ref-defines can be buried under
217/// layerblocks). To avoid this, we set an upper bound on the demand: the
218/// enclosing block of the demandee.
219///
220/// 2) not all regions are valid sink targets. If there is a sink-barrier
221/// between the operation and its demand, we adjust the demand upwards so that
222/// there is no sink barrier between the demandee and the demand site.
223static Demand clamp(Operation *op, Demand demand) {
224 if (!demand)
225 return nullptr;
226
227 auto *upper = op->getBlock();
228 assert(upper && "this should not be called on a top-level operation.");
229
230 if (!isAncestor(upper, demand.block))
231 demand = upper;
232
233 for (auto *i = demand.block; i != upper; i = i->getParentOp()->getBlock())
234 if (isBarrier(i->getParentOp()))
235 demand = i->getParentOp()->getBlock();
236
237 return demand;
238}
239
240namespace {
241struct DemandInfo {
242 using WorkStack = std::vector<Operation *>;
243
244 DemandInfo(const EffectInfo &, FModuleOp);
245
246 Demand getDemandFor(Operation *op) const { return table.lookup(op); }
247
248 /// Starting at effectful ops and output ports, propagate the demand of values
249 /// through the design, running until fixpoint. At the end, we have an
250 /// accurate picture of where every operation can be sunk, while preserving
251 /// effects in the program.
252 void run(const EffectInfo &, FModuleOp, WorkStack &);
253
254 /// Update the demand for the given op. If the demand changes, place the op
255 /// onto the worklist.
256 void update(WorkStack &work, Operation *op, Demand demand) {
257 if (table[op].mergeIn(clamp(op, demand)))
258 work.push_back(op);
259 }
260
261 void update(WorkStack &work, Value value, Demand demand) {
262 if (auto result = dyn_cast<OpResult>(value))
263 update(work, cast<OpResult>(value).getOwner(), demand);
264 }
265
266 void updateConnects(WorkStack &, Value, Demand);
267 void updateConnects(WorkStack &, Operation *, Demand);
268
269 llvm::DenseMap<Operation *, Demand> table;
270};
271} // namespace
272
273DemandInfo::DemandInfo(const EffectInfo &effectInfo, FModuleOp moduleOp) {
274 WorkStack work;
275 Block *body = moduleOp.getBodyBlock();
276 ArrayRef<bool> dirs = moduleOp.getPortDirections();
277 for (unsigned i = 0, e = moduleOp.getNumPorts(); i < e; ++i)
278 if (direction::get(dirs[i]) == Direction::Out)
279 updateConnects(work, body->getArgument(i), moduleOp.getBodyBlock());
280 moduleOp.getBodyBlock()->walk([&](Operation *op) {
281 if (effectInfo.effectful(op))
282 update(work, op, op->getBlock());
283 });
284 run(effectInfo, moduleOp, work);
285}
286
287void DemandInfo::run(const EffectInfo &effectInfo, FModuleOp, WorkStack &work) {
288 while (!work.empty()) {
289 auto *op = work.back();
290 work.pop_back();
291 auto demand = getDemandFor(op);
292 for (auto operand : op->getOperands())
293 update(work, operand, demand);
294 updateConnects(work, op, demand);
295 }
296}
297
298// The value represents a hardware declaration, such as a wire or port. Search
299// backwards through uses, looking through aliasing operations such as
300// subfields, to find connects that drive the given value. All driving
301// connects of a value are demanded by the same region as the value. If the
302// demand of the connect op is updated, then the demand will propagate
303// forwards to its operands through the normal forward-propagation of demand.
304void DemandInfo::updateConnects(WorkStack &work, Value value, Demand demand) {
305 struct StackElement {
306 Value value;
307 Value::user_iterator it;
308 };
309
310 SmallVector<StackElement> stack;
311 stack.push_back({value, value.user_begin()});
312 while (!stack.empty()) {
313 auto &top = stack.back();
314 auto end = top.value.user_end();
315 while (true) {
316 if (top.it == end) {
317 stack.pop_back();
318 break;
319 }
320 auto *user = *(top.it++);
321 if (auto connect = dyn_cast<FConnectLike>(user)) {
322 if (connect.getDest() == top.value) {
323 update(work, connect, demand);
324 }
325 continue;
326 }
327 if (isa<SubfieldOp, SubindexOp, SubaccessOp, ObjectSubfieldOp>(user)) {
328 for (auto result : user->getResults())
329 stack.push_back({result, result.user_begin()});
330 break;
331 }
332 }
333 }
334}
335
336void DemandInfo::updateConnects(WorkStack &work, Operation *op, Demand demand) {
337 if (isa<WireOp, RegResetOp, RegOp, MemOp, ObjectOp>(op)) {
338 for (auto result : op->getResults())
339 updateConnects(work, result, demand);
340 } else if (auto inst = dyn_cast<InstanceOp>(op)) {
341 auto dirs = inst.getPortDirections();
342 for (unsigned i = 0, e = inst->getNumResults(); i < e; ++i) {
343 if (direction::get(dirs[i]) == Direction::In)
344 updateConnects(work, inst.getResult(i), demand);
345 }
346 }
347}
348
349//===----------------------------------------------------------------------===//
350// ModuleLayerSink
351//===----------------------------------------------------------------------===//
352
353namespace {
354class ModuleLayerSink {
355public:
356 static bool run(FModuleOp moduleOp, const EffectInfo &effectInfo) {
357 return ModuleLayerSink(moduleOp, effectInfo)();
358 }
359
360private:
361 ModuleLayerSink(FModuleOp moduleOp, const EffectInfo &effectInfo)
362 : moduleOp(moduleOp), effectInfo(effectInfo) {}
363
364 bool operator()();
365 void moveLayersToBack(Operation *op);
366 void moveLayersToBack(Block *block);
367
368 void erase(Operation *op);
369 void sinkOpByCloning(Operation *op);
370 void sinkOpByMoving(Operation *op, Block *dest);
371
372 FModuleOp moduleOp;
373 const EffectInfo &effectInfo;
374 bool changed = false;
375};
376} // namespace
377
378// NOLINTNEXTLINE(misc-no-recursion)
379void ModuleLayerSink::moveLayersToBack(Operation *op) {
380 for (auto &r : op->getRegions())
381 for (auto &b : r.getBlocks())
382 moveLayersToBack(&b);
383}
384
385// NOLINTNEXTLINE(misc-no-recursion)
386void ModuleLayerSink::moveLayersToBack(Block *block) {
387 auto i = block->rbegin();
388 auto e = block->rend();
389
390 // Leave layerblocks that are already "at the back" where they are.
391 while (i != e) {
392 auto *op = &*i;
393 moveLayersToBack(op);
394 if (!isa<LayerBlockOp>(op))
395 break;
396 ++i;
397 }
398
399 if (i == e)
400 return;
401
402 // We have found a non-layerblock op at position `i`.
403 // Any block in front of `i`, will be moved to after `i`.
404 auto *ip = &*i;
405 while (i != e) {
406 auto *op = &*i;
407 moveLayersToBack(op);
408 if (isa<LayerBlockOp>(op)) {
409 ++i;
410 op->moveAfter(ip);
411 changed = true;
412 } else {
413 ++i;
414 }
415 }
416}
417
418void ModuleLayerSink::erase(Operation *op) {
419 op->erase();
420 changed = true;
421}
422
423void ModuleLayerSink::sinkOpByCloning(Operation *op) {
424 // Copy the op down to each block where there is a user.
425 // Use a cache to ensure we don't create more than one copy per block.
426 auto *src = op->getBlock();
427 DenseMap<Block *, Operation *> cache;
428 for (unsigned i = 0, e = op->getNumResults(); i < e; ++i) {
429 for (auto &use : llvm::make_early_inc_range(op->getResult(i).getUses())) {
430 auto *dst = use.getOwner()->getBlock();
431 if (dst != src) {
432 auto &clone = cache[dst];
433 if (!clone)
434 clone = OpBuilder::atBlockBegin(dst).clone(*op);
435 use.set(clone->getResult(i));
436 changed = true;
437 }
438 }
439 }
440
441 // If there are no remaining uses of the original op, erase it.
442 if (isOpTriviallyDead(op))
443 erase(op);
444}
445
446void ModuleLayerSink::sinkOpByMoving(Operation *op, Block *dst) {
447 if (dst != op->getBlock()) {
448 op->moveBefore(dst, dst->begin());
449 changed = true;
450 }
451}
452
453bool ModuleLayerSink::operator()() {
454 moveLayersToBack(moduleOp.getBodyBlock());
455 DemandInfo demandInfo(effectInfo, moduleOp);
456 walkBwd(moduleOp.getBodyBlock(), [&](Operation *op) {
457 auto demand = demandInfo.getDemandFor(op);
458 if (!demand)
459 return erase(op);
460 if (cloneable(op))
461 return sinkOpByCloning(op);
462 sinkOpByMoving(op, demand.block);
463 });
464
465 return changed;
466}
467
468//===----------------------------------------------------------------------===//
469// LayerSinkPass
470//===----------------------------------------------------------------------===//
471
472namespace {
473/// A control-flow sink pass.
474struct AdvancedLayerSinkPass final
475 : public circt::firrtl::impl::AdvancedLayerSinkBase<AdvancedLayerSinkPass> {
476 void runOnOperation() override;
477};
478} // namespace
479
480void AdvancedLayerSinkPass::runOnOperation() {
481 auto circuit = getOperation();
482 LLVM_DEBUG(debugPassHeader(this)
483 << "\n"
484 << "Circuit: '" << circuit.getName() << "'\n";);
485 auto &instanceGraph = getAnalysis<InstanceGraph>();
486 EffectInfo effectInfo(circuit, instanceGraph);
487
488 std::atomic<bool> changed(false);
489 parallelForEach(&getContext(), circuit.getOps<FModuleOp>(),
490 [&](FModuleOp moduleOp) {
491 if (ModuleLayerSink::run(moduleOp, effectInfo))
492 changed = true;
493 });
494
495 if (!changed)
496 markAllAnalysesPreserved();
497 else
498 markAnalysesPreserved<InstanceGraph>();
499}
500
501//===----------------------------------------------------------------------===//
502// Pass Constructor
503//===----------------------------------------------------------------------===//
504
505std::unique_ptr<mlir::Pass> circt::firrtl::createAdvancedLayerSinkPass() {
506 return std::make_unique<AdvancedLayerSinkPass>();
507}
static bool isBarrier(Operation *op)
True if we are prevented from sinking operations into the regions of the op.
static Demand clamp(Operation *op, Demand demand)
Adjust the demand based on the location of the op being demanded.
static bool isAncestor(Block *block, Block *other)
static bool isValidDest(Operation *op)
True if this operation is a good site to sink operations.
static bool cloneable(Operation *op)
assert(baseType &&"element must be base type")
static InstancePath empty
This class provides a read-only projection over the MLIR attributes that represent a set of annotatio...
This graph tracks modules and where they are instantiated.
connect(destination, source)
Definition support.py:39
static Direction get(bool isOutput)
Return an output direction if isOutput is true, otherwise return an input direction.
bool hasDontTouch(Value value)
Check whether a block argument ("port") or the operation defining a value has a DontTouch annotation,...
std::unique_ptr< mlir::Pass > createAdvancedLayerSinkPass()
ArrayAttr getAnnotationsIfPresent(Operation *op)
static bool operator==(const ModulePort &a, const ModulePort &b)
Definition HWTypes.h:35
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
bool operator!=(uint64_t a, const FVInt &b)
Definition FVInt.h:651
llvm::raw_ostream & debugPassHeader(const mlir::Pass *pass, int width=80)
Write a boilerplate header for a pass to the debug stream.
Definition Debug.cpp:31
int run(Type[Generator] generator=CppGenerator, cmdline_args=sys.argv)
Definition codegen.py:121