CIRCT 20.0.0git
Loading...
Searching...
No Matches
PrettifyVerilog.cpp
Go to the documentation of this file.
1//===- PrettifyVerilog.cpp - Transformations to improve Verilog quality ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass contains elective transformations that improve the quality of
10// SystemVerilog generated by the ExportVerilog library. This pass is not
11// compulsory: things that are required for ExportVerilog to be correct should
12// be included as part of the ExportVerilog pass itself to make sure it is self
13// contained. This allows the ExportVerilog pass to be simpler.
14//
15// PrettifyVerilog is run prior to Verilog emission but must be aware of the
16// options in LoweringOptions. It shouldn't introduce invalid constructs that
17// aren't present in the IR already: this isn't a general "raising" pass.
18//
19//===----------------------------------------------------------------------===//
20
26#include "mlir/IR/ImplicitLocOpBuilder.h"
27#include "mlir/IR/Matchers.h"
28#include "mlir/Pass/Pass.h"
29#include "llvm/ADT/TypeSwitch.h"
30
31namespace circt {
32namespace sv {
33#define GEN_PASS_DEF_PRETTIFYVERILOG
34#include "circt/Dialect/SV/SVPasses.h.inc"
35} // namespace sv
36} // namespace circt
37
38using namespace circt;
39//===----------------------------------------------------------------------===//
40// PrettifyVerilogPass
41//===----------------------------------------------------------------------===//
42
43namespace {
44struct PrettifyVerilogPass
45 : public circt::sv::impl::PrettifyVerilogBase<PrettifyVerilogPass> {
46 void runOnOperation() override;
47
48private:
49 void processPostOrder(Block &block);
50 bool prettifyUnaryOperator(Operation *op);
51 void sinkOrCloneOpToUses(Operation *op);
52 void sinkExpression(Operation *op);
53 void useNamedOperands(Operation *op, DenseMap<Value, Operation *> &pipeMap);
54
55 bool splitStructAssignment(OpBuilder &builder, hw::StructType ty, Value dst,
56 Value src);
57 bool splitArrayAssignment(OpBuilder &builder, hw::ArrayType ty, Value dst,
58 Value src);
59 bool splitAssignment(OpBuilder &builder, Value dst, Value src);
60
61 bool anythingChanged;
62 LoweringOptions options;
63
64 DenseSet<Operation *> toDelete;
65};
66} // end anonymous namespace
67
68/// Return true if this is something that will get printed as a unary operator
69/// by the Verilog printer.
70static bool isVerilogUnaryOperator(Operation *op) {
71 if (isa<comb::ParityOp>(op))
72 return true;
73
74 if (auto xorOp = dyn_cast<comb::XorOp>(op))
75 return xorOp.isBinaryNot();
76
77 if (auto icmpOp = dyn_cast<comb::ICmpOp>(op))
78 return icmpOp.isEqualAllOnes() || icmpOp.isNotEqualZero();
79
80 return false;
81}
82
83/// Helper to convert a value to a constant integer if it is one.
84static std::optional<APInt> getInt(Value value) {
85 if (auto cst = dyn_cast_or_null<hw::ConstantOp>(value.getDefiningOp()))
86 return cst.getValue();
87 return std::nullopt;
88}
89
90// Checks whether the destination and the source of an assignment are the same.
91// However, the destination is a value with an inout type in SV form, while the
92// source is built using ops from HWAggregates.
93static bool isSelfWrite(Value dst, Value src) {
94 if (dst == src)
95 return true;
96
97 auto *srcOp = src.getDefiningOp();
98 auto *dstOp = dst.getDefiningOp();
99 if (!srcOp || !dstOp)
100 return false;
101
102 return TypeSwitch<Operation *, bool>(srcOp)
103 .Case<hw::StructExtractOp>([&](auto extract) {
104 auto toField = dyn_cast<sv::StructFieldInOutOp>(dstOp);
105 if (!toField)
106 return false;
107 if (toField.getFieldAttr() != extract.getFieldNameAttr())
108 return false;
109 return isSelfWrite(toField.getInput(), extract.getInput());
110 })
111 .Case<hw::ArrayGetOp>([&](auto get) {
112 auto toGet = dyn_cast<sv::ArrayIndexInOutOp>(dstOp);
113 if (!toGet || toGet.getIndex().getType() != get.getIndex().getType())
114 return false;
115 auto toIdx = getInt(toGet.getIndex());
116 auto fromIdx = getInt(get.getIndex());
117 if (!toIdx || !fromIdx || toIdx != fromIdx)
118 return false;
119 return isSelfWrite(toGet.getInput(), get.getInput());
120 })
121 .Case<sv::ReadInOutOp>([&](auto read) { return dst == read.getInput(); })
122 .Default([&](auto srcOp) { return false; });
123}
124
125/// Split an assignment to a structure to writes of individual fields.
126bool PrettifyVerilogPass::splitStructAssignment(OpBuilder &builder,
127 hw::StructType ty, Value dst,
128 Value src) {
129 // Follow a chain of injects to find all fields overwritten and separate
130 // them into a series of field updates instead of a whole-structure write.
131 DenseMap<StringAttr, std::pair<Location, Value>> fields;
132 while (auto inj = dyn_cast_or_null<hw::StructInjectOp>(src.getDefiningOp())) {
133 // Inner injects are overwritten by outer injects.
134 // Insert does not overwrite the store to be lowered.
135 auto field = std::make_pair(inj.getLoc(), inj.getNewValue());
136 fields.try_emplace(inj.getFieldNameAttr(), field);
137 src = inj.getInput();
138 }
139
140 // The origin must be either the object itself (partial update)
141 // or the whole object must be overwritten.
142 if (!isSelfWrite(dst, src) && ty.getElements().size() != fields.size())
143 return false;
144
145 // Emit the field assignments in the order of their definition.
146 for (auto &field : ty.getElements()) {
147 const auto &name = field.name;
148
149 auto it = fields.find(name);
150 if (it == fields.end())
151 continue;
152
153 auto &[loc, value] = it->second;
154 auto ref = builder.create<sv::StructFieldInOutOp>(loc, dst, name);
155 if (!splitAssignment(builder, ref, value))
156 builder.create<sv::PAssignOp>(loc, ref, value);
157 }
158 return true;
159}
160
161/// Split an assignment to an array element to writes of individual indices.
162bool PrettifyVerilogPass::splitArrayAssignment(OpBuilder &builder,
163 hw::ArrayType ty, Value dst,
164 Value src) {
165 // Follow a chain of concat + slice operations that alter a single element.
166 if (auto op = dyn_cast_or_null<hw::ArrayCreateOp>(src.getDefiningOp())) {
167 // TODO: consider breaking up array assignments into assignments
168 // to individual fields.
169 auto ty = hw::type_cast<hw::ArrayType>(op.getType());
170 if (ty.getNumElements() != 1)
171 return false;
172 APInt zero(std::max(1u, llvm::Log2_64_Ceil(ty.getNumElements())), 0);
173
174 Value value = op.getInputs()[0];
175 auto loc = op.getLoc();
176 auto index = builder.create<hw::ConstantOp>(loc, zero);
177
178 auto field = builder.create<sv::ArrayIndexInOutOp>(loc, dst, index);
179 if (!splitAssignment(builder, field, value))
180 builder.create<sv::PAssignOp>(loc, field, value);
181 return true;
182 }
183
184 // TODO: generalise to ranges and arbitrary concatenations.
185 SmallVector<std::tuple<APInt, Location, Value>> fields;
186 while (auto concat =
187 dyn_cast_or_null<hw::ArrayConcatOp>(src.getDefiningOp())) {
188 auto loc = concat.getLoc();
189
190 // Look for a slice and an element:
191 // concat(slice(a, 0, size - 1), elem)
192 // concat(elem, slice(a, 1, size - 1))
193 if (concat.getNumOperands() == 2) {
194 auto c = concat.getInputs();
195
196 auto lhs = dyn_cast_or_null<hw::ArraySliceOp>(c[1].getDefiningOp());
197 auto rhs = dyn_cast_or_null<hw::ArraySliceOp>(c[0].getDefiningOp());
198 auto midL = dyn_cast_or_null<hw::ArrayCreateOp>(c[1].getDefiningOp());
199 auto midR = dyn_cast_or_null<hw::ArrayCreateOp>(c[0].getDefiningOp());
200
201 auto size =
202 hw::type_cast<hw::ArrayType>(concat.getType()).getNumElements();
203 if (lhs && midR) {
204 auto baseIdx = getInt(lhs.getLowIndex());
205 if (!baseIdx || *baseIdx != 0 || midR.getInputs().size() != 1)
206 break;
207 fields.emplace_back(APInt(baseIdx->getBitWidth(), size - 1), loc,
208 midR.getInputs()[0]);
209 src = lhs.getInput();
210 continue;
211 }
212 if (rhs && midL) {
213 auto baseIdx = getInt(rhs.getLowIndex());
214 if (!baseIdx || *baseIdx != 1 || midL.getInputs().size() != 1)
215 break;
216 src = rhs.getInput();
217 fields.emplace_back(APInt(baseIdx->getBitWidth(), 0), loc,
218 midL.getInputs()[0]);
219 continue;
220 }
221 break;
222 }
223
224 // Look for a pattern overwriting a single element of the array.
225 // concat(slice(a, 0, n - 1), create(get(a, n)), slice(n + 1, size -
226 // n))
227 if (concat.getNumOperands() == 3) {
228 auto c = concat.getInputs();
229 auto rhs = dyn_cast_or_null<hw::ArraySliceOp>(c[0].getDefiningOp());
230 auto mid = dyn_cast_or_null<hw::ArrayCreateOp>(c[1].getDefiningOp());
231 auto lhs = dyn_cast_or_null<hw::ArraySliceOp>(c[2].getDefiningOp());
232 if (!lhs || !mid || !rhs || mid.getInputs().size() != 1)
233 break;
234 auto elem = mid.getInputs()[0];
235 auto arr = lhs.getInput();
236 if (arr != rhs.getInput() || arr.getType() != concat.getType())
237 break;
238
239 auto lhsSize =
240 hw::type_cast<hw::ArrayType>(lhs.getType()).getNumElements();
241 auto lhsIdx = getInt(lhs.getLowIndex());
242 auto rhsIdx = getInt(rhs.getLowIndex());
243 if (!lhsIdx || *lhsIdx != 0)
244 break;
245 if (!rhsIdx || *rhsIdx != lhsSize + 1)
246 break;
247 fields.emplace_back(*rhsIdx - 1, loc, elem);
248 src = arr;
249 continue;
250 }
251 break;
252 }
253
254 if (!isSelfWrite(dst, src))
255 return false;
256
257 // Emit the assignments in the order of the indices.
258 std::stable_sort(fields.begin(), fields.end(), [](auto l, auto r) {
259 return std::get<0>(l).ult(std::get<0>(r));
260 });
261
262 std::optional<APInt> last;
263 for (auto &[i, loc, value] : fields) {
264 if (i == last)
265 continue;
266 auto index = builder.create<hw::ConstantOp>(loc, i);
267 auto field = builder.create<sv::ArrayIndexInOutOp>(loc, dst, index);
268 if (!splitAssignment(builder, field, value))
269 builder.create<sv::PAssignOp>(loc, field, value);
270 last = i;
271 }
272 return true;
273}
274
275/// Instead of emitting a struct_inject to alter fields or concatenation
276/// to adjust array elements, emit a more readable sequence of disjoint
277/// assignments to individual fields and indices. Returns true if
278/// sub-assignments were emitted and the original one can be deleted.
279bool PrettifyVerilogPass::splitAssignment(OpBuilder &builder, Value dst,
280 Value src) {
281 if (isSelfWrite(dst, src))
282 return true;
283
284 if (auto ty = hw::type_dyn_cast<hw::StructType>(src.getType()))
285 return splitStructAssignment(builder, ty, dst, src);
286
287 if (auto ty = hw::type_dyn_cast<hw::ArrayType>(src.getType()))
288 return splitArrayAssignment(builder, ty, dst, src);
289
290 return false;
291}
292
293/// Sink an operation into the same block where it is used. This will clone the
294/// operation so it can be sunk into multiple blocks. If there are no more uses
295/// in the current block, the op will be removed.
296void PrettifyVerilogPass::sinkOrCloneOpToUses(Operation *op) {
297 assert(mlir::isMemoryEffectFree(op) &&
298 "Op with side effects cannot be sunk to its uses.");
299 auto block = op->getBlock();
300 // This maps a block to the block local instance of the op.
301 SmallDenseMap<Block *, Value, 8> blockLocalValues;
302 for (auto &use : llvm::make_early_inc_range(op->getUses())) {
303 // If the current use is in the same block as the operation, there is
304 // nothing to do.
305 auto localBlock = use.getOwner()->getBlock();
306 if (block == localBlock)
307 continue;
308 // Find the block local clone of the operation. If there is not one already,
309 // the op will be cloned in to the block.
310 auto &localValue = blockLocalValues[localBlock];
311 if (!localValue) {
312 // Clone the operation and insert it to the beginning of the block.
313 localValue = OpBuilder::atBlockBegin(localBlock).clone(*op)->getResult(0);
314 }
315 // Replace the current use, removing it from the use list.
316 use.set(localValue);
317 anythingChanged = true;
318 }
319 // If this op is no longer used, drop it.
320 if (op->use_empty()) {
321 toDelete.insert(op);
322 anythingChanged = true;
323 }
324}
325
326/// This is called on unary operators. This returns true if the operator is
327/// moved.
328bool PrettifyVerilogPass::prettifyUnaryOperator(Operation *op) {
329 // If this is a multiple use unary operator, duplicate it and move it into the
330 // block corresponding to the user. This avoids emitting a temporary just for
331 // a unary operator. Instead of:
332 //
333 // tmp1 = ^(thing+thing);
334 // = tmp1 + 42
335 //
336 // we get:
337 //
338 // tmp2 = thing+thing;
339 // = ^tmp2 + 42
340 //
341 // This is particularly helpful when the operand of the unary op has multiple
342 // uses as well.
343 if (op->use_empty() || op->hasOneUse())
344 return false;
345
346 // If this operation has any users that cannot inline the operation, then
347 // don't duplicate any of them.
348 for (auto *user : op->getUsers()) {
349 if (isa<comb::ExtractOp, hw::ArraySliceOp>(user))
350 return false;
351 // TODO: We should use the condition used in ExportVerilog regarding
352 // allowExprInEventControl.
353 if (!options.allowExprInEventControl &&
354 isa<sv::AlwaysFFOp, sv::AlwaysOp, sv::AssertConcurrentOp,
355 sv::AssumeConcurrentOp, sv::CoverConcurrentOp, sv::AssertPropertyOp,
356 sv::AssumePropertyOp, sv::CoverPropertyOp>(user))
357 return false;
358 }
359
360 // Duplicating unary operations can move them across blocks (down the region
361 // tree). Make sure to keep referenced constants local.
362 auto cloneConstantOperandsIfNeeded = [&](Operation *op) {
363 for (auto &operand : op->getOpOperands()) {
364 auto constant = operand.get().getDefiningOp<hw::ConstantOp>();
365 if (!constant)
366 continue;
367
368 // If the constant is in a different block, clone or move it into the
369 // block.
370 if (constant->getBlock() != op->getBlock())
371 operand.set(OpBuilder(op).clone(*constant)->getResult(0));
372 }
373 };
374
375 while (!op->hasOneUse()) {
376 OpOperand &use = *op->use_begin();
377 Operation *user = use.getOwner();
378
379 // Clone the operation and insert before this user.
380 auto *cloned = OpBuilder(user).clone(*op);
381 cloneConstantOperandsIfNeeded(cloned);
382
383 // Update user's operand to the new value.
384 use.set(cloned->getResult(0));
385 }
386
387 // There is exactly one user left, so move this before it.
388 Operation *user = *op->user_begin();
389 op->moveBefore(user);
390 cloneConstantOperandsIfNeeded(op);
391
392 anythingChanged = true;
393 return true;
394}
395
396// Return the depth of the specified block in the region tree, stopping at
397// 'topBlock'.
398static unsigned getBlockDepth(Block *block, Block *topBlock) {
399 unsigned result = 0;
400 while (block != topBlock) {
401 block = block->getParentOp()->getBlock();
402 ++result;
403 }
404 return result;
405}
406
407/// This method is called on expressions to see if we can sink them down the
408/// region tree. This is a good thing to do to reduce scope of the expression.
409///
410/// This is called on expressions that may have side effects, and filters out
411/// the side effecting cases late for efficiency.
412void PrettifyVerilogPass::sinkExpression(Operation *op) {
413 // Ignore expressions with no users.
414 if (op->use_empty())
415 return;
416
417 Block *curOpBlock = op->getBlock();
418
419 // Single-used expressions are the most common and simple to handle.
420 if (op->hasOneUse()) {
421 if (curOpBlock != op->user_begin()->getBlock()) {
422 // Ok, we're about to make a change, ensure that there are no side
423 // effects.
424 if (!mlir::isMemoryEffectFree(op))
425 return;
426
427 op->moveBefore(*op->user_begin());
428 anythingChanged = true;
429 }
430 return;
431 }
432
433 // Find the nearest common ancestor of all the users.
434 auto userIt = op->user_begin();
435 Block *ncaBlock = userIt->getBlock();
436 ++userIt;
437 unsigned ncaBlockDepth = getBlockDepth(ncaBlock, curOpBlock);
438 if (ncaBlockDepth == 0)
439 return; // Have a user in the current block.
440
441 for (auto e = op->user_end(); userIt != e; ++userIt) {
442 auto *userBlock = userIt->getBlock();
443 if (userBlock == curOpBlock)
444 return; // Op has a user in it own block, can't sink it.
445 if (userBlock == ncaBlock)
446 continue;
447
448 // Get the region depth of the user block so we can march up the region tree
449 // to a common ancestor.
450 unsigned userBlockDepth = getBlockDepth(userBlock, curOpBlock);
451 while (userBlock != ncaBlock) {
452 if (ncaBlockDepth < userBlockDepth) {
453 userBlock = userBlock->getParentOp()->getBlock();
454 --userBlockDepth;
455 } else if (userBlockDepth < ncaBlockDepth) {
456 ncaBlock = ncaBlock->getParentOp()->getBlock();
457 --ncaBlockDepth;
458 } else {
459 userBlock = userBlock->getParentOp()->getBlock();
460 --userBlockDepth;
461 ncaBlock = ncaBlock->getParentOp()->getBlock();
462 --ncaBlockDepth;
463 }
464 }
465
466 if (ncaBlockDepth == 0)
467 return; // Have a user in the current block.
468 }
469
470 // Ok, we're about to make a change, ensure that there are no side
471 // effects.
472 if (!mlir::isMemoryEffectFree(op))
473 return;
474
475 // Ok, we found a common ancestor between all the users that is deeper than
476 // the current op. Sink it into the start of that block.
477 assert(ncaBlock != curOpBlock && "should have bailed out earlier");
478 op->moveBefore(&ncaBlock->front());
479 anythingChanged = true;
480}
481
482void PrettifyVerilogPass::processPostOrder(Block &body) {
483 SmallVector<Operation *> instances;
484
485 // Walk the block bottom-up, processing the region tree inside out.
486 for (auto &op :
487 llvm::make_early_inc_range(llvm::reverse(body.getOperations()))) {
488 if (op.getNumRegions()) {
489 for (auto &region : op.getRegions())
490 for (auto &regionBlock : region.getBlocks())
491 processPostOrder(regionBlock);
492 }
493
494 // Simplify assignments involving structures and arrays.
495 if (auto assign = dyn_cast<sv::PAssignOp>(op)) {
496 auto dst = assign.getDest();
497 auto src = assign.getSrc();
498 if (!isSelfWrite(dst, src)) {
499 OpBuilder builder(assign);
500 if (splitAssignment(builder, dst, src)) {
501 anythingChanged = true;
502 toDelete.insert(src.getDefiningOp());
503 toDelete.insert(dst.getDefiningOp());
504 assign.erase();
505 continue;
506 }
507 }
508 }
509
510 // Sink and duplicate unary operators.
511 if (isVerilogUnaryOperator(&op) && prettifyUnaryOperator(&op))
512 continue;
513
514 // Sink or duplicate constant ops and invisible "free" ops into the same
515 // block as their use. This will allow the verilog emitter to inline
516 // constant expressions and avoids ReadInOutOp from preventing motion.
517 if (matchPattern(&op, mlir::m_Constant()) ||
518 isa<sv::ReadInOutOp, sv::ArrayIndexInOutOp, sv::StructFieldInOutOp,
519 sv::IndexedPartSelectInOutOp, hw::ParamValueOp>(op)) {
520 sinkOrCloneOpToUses(&op);
521 continue;
522 }
523
524 // Sink normal expressions down the region tree if they aren't used within
525 // their current block. This allows them to be folded into the using
526 // expression inline in the best case, and better scopes the temporary wire
527 // they generate in the worst case. Our overall traversal order is
528 // post-order here which means all users will already be sunk.
529 if (hw::isCombinational(&op) || sv::isExpression(&op)) {
530 sinkExpression(&op);
531 continue;
532 }
533
534 if (isa<hw::InstanceOp>(op))
535 instances.push_back(&op);
536 }
537
538 // If we have any instances, keep their relative order but shift them to the
539 // end of the module. Any outputs will be writing to a wire or an output port
540 // of the enclosing module anyway, and this allows inputs to be inlined into
541 // the operand list as parameters.
542 if (!instances.empty()) {
543 for (Operation *instance : llvm::reverse(instances)) {
544 if (instance != &body.back())
545 instance->moveBefore(&body.back());
546 }
547 }
548}
549
550void PrettifyVerilogPass::runOnOperation() {
551 hw::HWModuleOp thisModule = getOperation();
552 options = LoweringOptions(thisModule->getParentOfType<mlir::ModuleOp>());
553
554 // Keeps track if anything changed during this pass, used to determine if
555 // the analyses were preserved.
556 anythingChanged = false;
557
558 // Walk the operations in post-order, transforming any that are interesting.
559 processPostOrder(*thisModule.getBodyBlock());
560
561 // Erase any dangling operands of simplified operations.
562 while (!toDelete.empty()) {
563 auto it = toDelete.begin();
564 Operation *op = *it;
565 toDelete.erase(it);
566
567 if (!op || !isOpTriviallyDead(op))
568 continue;
569
570 for (auto operand : op->getOperands())
571 toDelete.insert(operand.getDefiningOp());
572
573 op->erase();
574 }
575
576 // If we did not change anything in the graph mark all analysis as
577 // preserved.
578 if (!anythingChanged)
579 markAllAnalysesPreserved();
580}
581
583 return std::make_unique<PrettifyVerilogPass>();
584}
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
Definition CalyxOps.cpp:540
static bool isSelfWrite(Value dst, Value src)
static std::optional< APInt > getInt(Value value)
Helper to convert a value to a constant integer if it is one.
static unsigned getBlockDepth(Block *block, Block *topBlock)
static bool isVerilogUnaryOperator(Operation *op)
Return true if this is something that will get printed as a unary operator by the Verilog printer.
create(data_type, value)
Definition hw.py:433
std::unique_ptr< mlir::Pass > createPrettifyVerilogPass()
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition sv.py:1
Options which control the emission from CIRCT to Verilog.