CIRCT 20.0.0git
Loading...
Searching...
No Matches
AdvancedLayerSink.cpp
Go to the documentation of this file.
1//===- AdvancedLayerSink.cpp - Sink ops into layer blocks -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass sinks operations into layer blocks.
10//
11//===----------------------------------------------------------------------===//
12
16#include "circt/Support/Debug.h"
17#include "mlir/IR/Dominance.h"
18#include "mlir/IR/Threading.h"
19#include "mlir/Interfaces/SideEffectInterfaces.h"
20#include "mlir/Pass/Pass.h"
21#include "mlir/Transforms/ControlFlowSinkUtils.h"
22#include "llvm/ADT/PostOrderIterator.h"
23
24#define DEBUG_TYPE "firrtl-layer-sink"
25
26namespace circt {
27namespace firrtl {
28#define GEN_PASS_DEF_ADVANCEDLAYERSINK
29#include "circt/Dialect/FIRRTL/Passes.h.inc"
30} // namespace firrtl
31} // namespace circt
32
33using namespace circt;
34using namespace firrtl;
35using namespace mlir;
36
37//===----------------------------------------------------------------------===//
38// Helpers
39//===----------------------------------------------------------------------===//
40
41// NOLINTBEGIN(misc-no-recursion)
42namespace {
43/// Walk the ops in `block` bottom-up, back-to-front order. The block walk API
44/// in upstream MLIR, although it takes an Iterator parameter, will always walk
45/// the top block front-to-back. This walk function will actually walk the ops
46/// under `block` back-to-front.
47template <typename T>
48void walkBwd(Block *block, T &&f) {
49 for (auto &op :
50 llvm::make_early_inc_range(llvm::reverse(block->getOperations()))) {
51 for (auto &region : op.getRegions())
52 for (auto &block : region.getBlocks())
53 walkBwd(&block, f);
54 f(&op);
55 }
56}
57} // namespace
58// NOLINTEND(misc-no-recursion)
59
60static bool isAncestor(Block *block, Block *other) {
61 if (block == other)
62 return true;
63 return block->getParent()->isProperAncestor(other->getParent());
64}
65
66//===----------------------------------------------------------------------===//
67// Effectfulness Analysis.
68//===----------------------------------------------------------------------===//
69
70/// A table that can determine whether an operation is effectful.
71namespace {
72class EffectInfo {
73public:
74 EffectInfo(CircuitOp circuit, InstanceGraph &instanceGraph) {
75 DenseSet<InstanceGraphNode *> visited;
76 for (auto *root : instanceGraph) {
77 for (auto *node : llvm::post_order_ext(root, visited)) {
78 auto *op = node->getModule().getOperation();
79 update(op);
80 }
81 }
82 }
83
84 /// True if the given operation is NOT moveable due to some effect.
85 bool effectful(Operation *op) const {
86 if (!AnnotationSet(op).empty() || hasDontTouch(op))
87 return true;
88 if (auto name = dyn_cast<FNamableOp>(op))
89 if (!name.hasDroppableName())
90 return true;
91 if (op->getNumRegions() != 0)
92 return true;
93 if (auto instance = dyn_cast<InstanceOp>(op))
94 return effectfulModules.contains(instance.getModuleNameAttr().getAttr());
95 if (isa<FConnectLike, WireOp, RegResetOp, RegOp, MemOp, NodeOp>(op))
96 return false;
97 return !(mlir::isMemoryEffectFree(op) ||
98 mlir::hasSingleEffect<mlir::MemoryEffects::Allocate>(op) ||
99 mlir::hasSingleEffect<mlir::MemoryEffects::Read>(op));
100 }
101
102private:
103 /// Record whether the module contains any effectful ops.
104 void update(FModuleOp moduleOp) {
105 moduleOp.getBodyBlock()->walk([&](Operation *op) {
106 if (effectful(op)) {
107 markEffectful(moduleOp);
108 return WalkResult::interrupt();
109 }
110 return WalkResult::advance();
111 });
112 }
113
114 /// Record whether the module-like op contains any effectful op.
115 void update(FModuleLike moduleOp) {
116 // If the module op has any annotations, then we pretend the module contains
117 // some kind of important effect, so that we cannot sink its instances.
118 if (auto annos = getAnnotationsIfPresent(moduleOp))
119 if (!annos.empty())
120 return markEffectful(moduleOp);
121
122 for (auto annos : moduleOp.getPortAnnotations())
123 if (!cast<ArrayAttr>(annos).empty())
124 return markEffectful(moduleOp);
125
126 auto *op = moduleOp.getOperation();
127 // Regular modules may be pure.
128 if (auto m = dyn_cast<FModuleOp>(op))
129 return update(m);
130 // Memory modules are pure.
131 if (auto m = dyn_cast<FMemModuleOp>(op))
132 return;
133 // All other kinds of modules are effectful.
134 // intmodules, extmodules, classes.
135 return markEffectful(moduleOp);
136 }
137
138 void update(Operation *op) {
139 if (auto moduleOp = dyn_cast<FModuleLike>(op))
140 update(moduleOp);
141 }
142
143 /// Record that the given module contains an effectful operation.
144 void markEffectful(FModuleLike moduleOp) {
145 effectfulModules.insert(moduleOp.getModuleNameAttr());
146 }
147
148 DenseSet<StringAttr> effectfulModules;
149};
150} // namespace
151
152//===----------------------------------------------------------------------===//
153// Demands Analysis.
154//===----------------------------------------------------------------------===//
155
156namespace {
157/// The LCA of the blocks in which a value is used/demanded. Lattice value.
158struct Demand {
159 constexpr Demand() : Demand(nullptr) {}
160 constexpr Demand(Block *block) : block(block) {}
161
162 constexpr Demand merge(Demand other) const {
163 if (block == other.block)
164 return Demand(block);
165 if (other.block == nullptr)
166 return Demand(block);
167 if (block == nullptr)
168 return Demand(other.block);
169
170 auto *b = block;
171 while (b && !isAncestor(b, other.block))
172 b = b->getParentOp()->getBlock();
173
174 return Demand(b);
175 }
176
177 bool mergeIn(Demand other) {
178 auto prev = *this;
179 auto next = merge(other);
180 *this = next;
181 return prev != next;
182 }
183
184 constexpr bool operator==(Demand rhs) const { return block == rhs.block; }
185 constexpr bool operator!=(Demand rhs) const { return block != rhs.block; }
186 constexpr operator bool() const { return block; }
187
188 Block *block;
189};
190} // namespace
191
192/// True if this operation is a good site to sink operations.
193static bool isValidDest(Operation *op) {
194 return op && (isa<LayerBlockOp>(op) || isa<FModuleOp>(op));
195}
196
197/// True if we are prevented from sinking operations into the regions of the op.
198static bool isBarrier(Operation *op) { return !isValidDest(op); }
199
200/// Adjust the demand based on the location of the op being demanded. Ideally,
201/// we can sink an op directly to its site of use. However, there are two
202/// issues.
203///
204/// 1) Typically, an op will dominate every demand, but for hardware
205/// declarations such as wires, the declaration will demand any connections
206/// driving it. In this case, the relationship is reversed: the demander
207/// dominates the demandee. This can cause us to pull connect-like ops up and
208/// and out of their enclosing block (ref-defines can be buried under
209/// layerblocks). To avoid this, we set an upper bound on the demand: the
210/// enclosing block of the demandee.
211///
212/// 2) not all regions are valid sink targets. If there is a sink-barrier
213/// between the operation and its demand, we adjust the demand upwards so that
214/// there is no sink barrier between the demandee and the demand site.
215static Demand clamp(Operation *op, Demand demand) {
216 if (!demand)
217 return nullptr;
218
219 auto *upper = op->getBlock();
220 assert(upper && "this should not be called on a top-level operation.");
221
222 if (!isAncestor(upper, demand.block))
223 demand = upper;
224
225 for (auto *i = demand.block; i != upper; i = i->getParentOp()->getBlock())
226 if (isBarrier(i->getParentOp()))
227 demand = i->getParentOp()->getBlock();
228
229 return demand;
230}
231
232namespace {
233struct DemandInfo {
234 using WorkStack = std::vector<Operation *>;
235
236 DemandInfo(const EffectInfo &, FModuleOp);
237
238 Demand getDemandFor(Operation *op) const { return table.lookup(op); }
239
240 /// Starting at effectful ops and output ports, propagate the demand of values
241 /// through the design, running until fixpoint. At the end, we have an
242 /// accurate picture of where every operation can be sunk, while preserving
243 /// effects in the program.
244 void run(const EffectInfo &, FModuleOp, WorkStack &);
245
246 /// Update the demand for the given op. If the demand changes, place the op
247 /// onto the worklist.
248 void update(WorkStack &work, Operation *op, Demand demand) {
249 if (table[op].mergeIn(clamp(op, demand)))
250 work.push_back(op);
251 }
252
253 void update(WorkStack &work, Value value, Demand demand) {
254 if (auto result = dyn_cast<OpResult>(value))
255 update(work, cast<OpResult>(value).getOwner(), demand);
256 }
257
258 void updateConnects(WorkStack &, Value, Demand);
259 void updateConnects(WorkStack &, Operation *, Demand);
260
261 llvm::DenseMap<Operation *, Demand> table;
262};
263} // namespace
264
265DemandInfo::DemandInfo(const EffectInfo &effectInfo, FModuleOp moduleOp) {
266 WorkStack work;
267 Block *body = moduleOp.getBodyBlock();
268 ArrayRef<bool> dirs = moduleOp.getPortDirections();
269 for (unsigned i = 0, e = moduleOp.getNumPorts(); i < e; ++i)
270 if (direction::get(dirs[i]) == Direction::Out)
271 updateConnects(work, body->getArgument(i), moduleOp.getBodyBlock());
272 moduleOp.getBodyBlock()->walk([&](Operation *op) {
273 if (effectInfo.effectful(op))
274 update(work, op, op->getBlock());
275 });
276 run(effectInfo, moduleOp, work);
277}
278
279void DemandInfo::run(const EffectInfo &effectInfo, FModuleOp, WorkStack &work) {
280 while (!work.empty()) {
281 auto *op = work.back();
282 work.pop_back();
283 auto demand = getDemandFor(op);
284 for (auto operand : op->getOperands())
285 update(work, operand, demand);
286 updateConnects(work, op, demand);
287 }
288}
289
290// The value represents a hardware declaration, such as a wire or port. Search
291// backwards through uses, looking through aliasing operations such as
292// subfields, to find connects that drive the given value. All driving
293// connects of a value are demanded by the same region as the value. If the
294// demand of the connect op is updated, then the demand will propagate
295// forwards to its operands through the normal forward-propagation of demand.
296void DemandInfo::updateConnects(WorkStack &work, Value value, Demand demand) {
297 struct StackElement {
298 Value value;
299 Value::user_iterator it;
300 };
301
302 SmallVector<StackElement> stack;
303 stack.push_back({value, value.user_begin()});
304 while (!stack.empty()) {
305 auto &top = stack.back();
306 auto end = top.value.user_end();
307 while (true) {
308 if (top.it == end) {
309 stack.pop_back();
310 break;
311 }
312 auto *user = *(top.it++);
313 if (auto connect = dyn_cast<FConnectLike>(user)) {
314 if (connect.getDest() == top.value) {
315 update(work, connect, demand);
316 }
317 continue;
318 }
319 if (isa<SubfieldOp, SubindexOp, SubaccessOp, ObjectSubfieldOp>(user)) {
320 for (auto result : user->getResults())
321 stack.push_back({result, result.user_begin()});
322 break;
323 }
324 }
325 }
326}
327
328void DemandInfo::updateConnects(WorkStack &work, Operation *op, Demand demand) {
329 if (isa<WireOp, RegResetOp, RegOp, MemOp, ObjectOp>(op)) {
330 for (auto result : op->getResults())
331 updateConnects(work, result, demand);
332 } else if (auto inst = dyn_cast<InstanceOp>(op)) {
333 auto dirs = inst.getPortDirections();
334 for (unsigned i = 0, e = inst->getNumResults(); i < e; ++i) {
335 if (direction::get(dirs[i]) == Direction::In)
336 updateConnects(work, inst.getResult(i), demand);
337 }
338 }
339}
340
341//===----------------------------------------------------------------------===//
342// ModuleLayerSink
343//===----------------------------------------------------------------------===//
344
345namespace {
346class ModuleLayerSink {
347public:
348 static bool run(FModuleOp moduleOp, const EffectInfo &effectInfo) {
349 return ModuleLayerSink(moduleOp, effectInfo)();
350 }
351
352private:
353 ModuleLayerSink(FModuleOp moduleOp, const EffectInfo &effectInfo)
354 : moduleOp(moduleOp), effectInfo(effectInfo) {}
355
356 bool operator()();
357 void moveLayersToBack(Operation *op);
358 void moveLayersToBack(Block *block);
359
360 FModuleOp moduleOp;
361 const EffectInfo &effectInfo;
362 bool changed = false;
363};
364} // namespace
365
366// NOLINTNEXTLINE(misc-no-recursion)
367void ModuleLayerSink::moveLayersToBack(Operation *op) {
368 for (auto &r : op->getRegions())
369 for (auto &b : r.getBlocks())
370 moveLayersToBack(&b);
371}
372
373// NOLINTNEXTLINE(misc-no-recursion)
374void ModuleLayerSink::moveLayersToBack(Block *block) {
375 auto i = block->rbegin();
376 auto e = block->rend();
377
378 // Leave layerblocks that are already "at the back" where they are.
379 while (i != e) {
380 auto *op = &*i;
381 moveLayersToBack(op);
382 if (!isa<LayerBlockOp>(op))
383 break;
384 ++i;
385 }
386
387 if (i == e)
388 return;
389
390 // We have found a non-layerblock op at position `i`.
391 // Any block in front of `i`, will be moved to after `i`.
392 auto *ip = &*i;
393 while (i != e) {
394 auto *op = &*i;
395 moveLayersToBack(op);
396 if (isa<LayerBlockOp>(op)) {
397 ++i;
398 op->moveAfter(ip);
399 changed = true;
400 } else {
401 ++i;
402 }
403 }
404}
405
406bool ModuleLayerSink::operator()() {
407 moveLayersToBack(moduleOp.getBodyBlock());
408 DemandInfo demandInfo(effectInfo, moduleOp);
409 walkBwd(moduleOp.getBodyBlock(), [&](Operation *op) {
410 auto demand = demandInfo.getDemandFor(op);
411 if (!demand) {
412 op->erase();
413 changed = true;
414 return;
415 }
416
417 if (demand == op->getBlock())
418 return;
419
420 op->moveBefore(demand.block, demand.block->begin());
421 changed = true;
422 });
423
424 return changed;
425}
426
427//===----------------------------------------------------------------------===//
428// LayerSinkPass
429//===----------------------------------------------------------------------===//
430
431namespace {
432/// A control-flow sink pass.
433struct AdvancedLayerSinkPass final
434 : public circt::firrtl::impl::AdvancedLayerSinkBase<AdvancedLayerSinkPass> {
435 void runOnOperation() override;
436};
437} // namespace
438
439void AdvancedLayerSinkPass::runOnOperation() {
440 auto circuit = getOperation();
441 LLVM_DEBUG(debugPassHeader(this)
442 << "\n"
443 << "Circuit: '" << circuit.getName() << "'\n";);
444 auto &instanceGraph = getAnalysis<InstanceGraph>();
445 EffectInfo effectInfo(circuit, instanceGraph);
446
447 std::atomic<bool> changed(false);
448 parallelForEach(&getContext(), circuit.getOps<FModuleOp>(),
449 [&](FModuleOp moduleOp) {
450 if (ModuleLayerSink::run(moduleOp, effectInfo))
451 changed = true;
452 });
453
454 if (!changed)
455 markAllAnalysesPreserved();
456 else
457 markAnalysesPreserved<InstanceGraph>();
458}
459
460//===----------------------------------------------------------------------===//
461// Pass Constructor
462//===----------------------------------------------------------------------===//
463
464std::unique_ptr<mlir::Pass> circt::firrtl::createAdvancedLayerSinkPass() {
465 return std::make_unique<AdvancedLayerSinkPass>();
466}
static bool isBarrier(Operation *op)
True if we are prevented from sinking operations into the regions of the op.
static Demand clamp(Operation *op, Demand demand)
Adjust the demand based on the location of the op being demanded.
static bool isAncestor(Block *block, Block *other)
static bool isValidDest(Operation *op)
True if this operation is a good site to sink operations.
assert(baseType &&"element must be base type")
static InstancePath empty
This class provides a read-only projection over the MLIR attributes that represent a set of annotatio...
This graph tracks modules and where they are instantiated.
connect(destination, source)
Definition support.py:39
static Direction get(bool isOutput)
Return an output direction if isOutput is true, otherwise return an input direction.
bool hasDontTouch(Value value)
Check whether a block argument ("port") or the operation defining a value has a DontTouch annotation,...
std::unique_ptr< mlir::Pass > createAdvancedLayerSinkPass()
ArrayAttr getAnnotationsIfPresent(Operation *op)
static bool operator==(const ModulePort &a, const ModulePort &b)
Definition HWTypes.h:35
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
bool operator!=(uint64_t a, const FVInt &b)
Definition FVInt.h:651
llvm::raw_ostream & debugPassHeader(const mlir::Pass *pass, int width=80)
Write a boilerplate header for a pass to the debug stream.
Definition Debug.cpp:31
int run(Type[Generator] generator=CppGenerator, cmdline_args=sys.argv)
Definition codegen.py:121