CIRCT 22.0.0git
Loading...
Searching...
No Matches
CombFolds.cpp
Go to the documentation of this file.
1//===- CombFolds.cpp - Folds + Canonicalization for Comb operations -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
13#include "mlir/IR/Diagnostics.h"
14#include "mlir/IR/Matchers.h"
15#include "mlir/IR/PatternMatch.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/SmallBitVector.h"
18#include "llvm/ADT/TypeSwitch.h"
19#include "llvm/Support/KnownBits.h"
20
21using namespace mlir;
22using namespace circt;
23using namespace comb;
24using namespace matchers;
25
26// Returns true if the op has one of its own results as an operand.
27static bool isOpTriviallyRecursive(Operation *op) {
28 return llvm::any_of(op->getOperands(), [op](auto operand) {
29 return operand.getDefiningOp() == op;
30 });
31}
32
33/// Create a new instance of a generic operation that only has value operands,
34/// and has a single result value whose type matches the first operand.
35///
36/// This should not be used to create instances of ops with attributes or with
37/// more complicated type signatures.
38static Value createGenericOp(Location loc, OperationName name,
39 ArrayRef<Value> operands, OpBuilder &builder) {
40 OperationState state(loc, name);
41 state.addOperands(operands);
42 state.addTypes(operands[0].getType());
43 return builder.create(state)->getResult(0);
44}
45
46static TypedAttr getIntAttr(const APInt &value, MLIRContext *context) {
47 return IntegerAttr::get(IntegerType::get(context, value.getBitWidth()),
48 value);
49}
50
51/// Flatten concat and mux operands into a vector.
52static void getConcatOperands(Value v, SmallVectorImpl<Value> &result) {
53 if (auto concat = v.getDefiningOp<ConcatOp>()) {
54 for (auto op : concat.getOperands())
55 getConcatOperands(op, result);
56 } else if (auto repl = v.getDefiningOp<ReplicateOp>()) {
57 for (size_t i = 0, e = repl.getMultiple(); i != e; ++i)
58 getConcatOperands(repl.getOperand(), result);
59 } else {
60 result.push_back(v);
61 }
62}
63
64// Return true if the op has SV attributes. Note that we cannot use a helper
65// function `hasSVAttributes` defined under SV dialect because of a cyclic
66// dependency.
67static bool hasSVAttributes(Operation *op) {
68 return op->hasAttr("sv.attributes");
69}
70
71namespace {
72template <typename SubType>
73struct ComplementMatcher {
74 SubType lhs;
75 ComplementMatcher(SubType lhs) : lhs(std::move(lhs)) {}
76 bool match(Operation *op) {
77 auto xorOp = dyn_cast<XorOp>(op);
78 return xorOp && xorOp.isBinaryNot() && lhs.match(op->getOperand(0));
79 }
80};
81} // end anonymous namespace
82
83template <typename SubType>
84static inline ComplementMatcher<SubType> m_Complement(const SubType &subExpr) {
85 return ComplementMatcher<SubType>(subExpr);
86}
87
88/// Return true if the op will be flattened afterwards. Op will be flattend if
89/// it has a single user which has a same op type. User must be in same block.
90static bool shouldBeFlattened(Operation *op) {
91 assert((isa<AndOp, OrOp, XorOp, AddOp, MulOp>(op) &&
92 "must be commutative operations"));
93 if (op->hasOneUse()) {
94 auto *user = *op->getUsers().begin();
95 return user->getName() == op->getName() &&
96 op->getAttrOfType<UnitAttr>("twoState") ==
97 user->getAttrOfType<UnitAttr>("twoState") &&
98 op->getBlock() == user->getBlock();
99 }
100 return false;
101}
102
103/// Flattens a single input in `op` if `hasOneUse` is true and it can be defined
104/// as an Op. Returns true if successful, and false otherwise.
105///
106/// Example: op(1, 2, op(3, 4), 5) -> op(1, 2, 3, 4, 5) // returns true
107///
108static bool tryFlatteningOperands(Operation *op, PatternRewriter &rewriter) {
109 // Skip if the operation should be flattened by another operation.
110 if (shouldBeFlattened(op))
111 return false;
112
113 auto inputs = op->getOperands();
114
115 SmallVector<Value, 4> newOperands;
116 SmallVector<Location, 4> newLocations{op->getLoc()};
117 newOperands.reserve(inputs.size());
118 struct Element {
119 decltype(inputs.begin()) current, end;
120 };
121
122 SmallVector<Element> worklist;
123 worklist.push_back({inputs.begin(), inputs.end()});
124 bool binFlag = op->hasAttrOfType<UnitAttr>("twoState");
125 bool changed = false;
126 while (!worklist.empty()) {
127 auto &element = worklist.back(); // Do not pop. Take ref.
128
129 // Pop when we finished traversing the current operand range.
130 if (element.current == element.end) {
131 worklist.pop_back();
132 continue;
133 }
134
135 Value value = *element.current++;
136 auto *flattenOp = value.getDefiningOp();
137 // If not defined by a compatible operation of the same kind and
138 // from the same block, keep this as-is.
139 if (!flattenOp || flattenOp->getName() != op->getName() ||
140 flattenOp == op || binFlag != op->hasAttrOfType<UnitAttr>("twoState") ||
141 flattenOp->getBlock() != op->getBlock()) {
142 newOperands.push_back(value);
143 continue;
144 }
145
146 // Don't duplicate logic when it has multiple uses.
147 if (!value.hasOneUse()) {
148 // We can fold a multi-use binary operation into this one if this allows a
149 // constant to fold though. For example, fold
150 // (or a, b, c, (or d, cst1), cst2) --> (or a, b, c, d, cst1, cst2)
151 // since the constants will both fold and we end up with the equiv cost.
152 //
153 // We don't do this for add/mul because the hardware won't be shared
154 // between the two ops if duplicated.
155 if (flattenOp->getNumOperands() != 2 || !isa<AndOp, OrOp, XorOp>(op) ||
156 !flattenOp->getOperand(1).getDefiningOp<hw::ConstantOp>() ||
157 !inputs.back().getDefiningOp<hw::ConstantOp>()) {
158 newOperands.push_back(value);
159 continue;
160 }
161 }
162
163 changed = true;
164
165 // Otherwise, push operands into worklist.
166 auto flattenOpInputs = flattenOp->getOperands();
167 worklist.push_back({flattenOpInputs.begin(), flattenOpInputs.end()});
168 newLocations.push_back(flattenOp->getLoc());
169 }
170
171 if (!changed)
172 return false;
173
174 Value result = createGenericOp(FusedLoc::get(op->getContext(), newLocations),
175 op->getName(), newOperands, rewriter);
176 if (binFlag)
177 result.getDefiningOp()->setAttr("twoState", rewriter.getUnitAttr());
178
179 replaceOpAndCopyNamehint(rewriter, op, result);
180 return true;
181}
182
183// Given a range of uses of an operation, find the lowest and highest bits
184// inclusive that are ever referenced. The range of uses must not be empty.
185static std::pair<size_t, size_t>
186getLowestBitAndHighestBitRequired(Operation *op, bool narrowTrailingBits,
187 size_t originalOpWidth) {
188 auto users = op->getUsers();
189 assert(!users.empty() &&
190 "getLowestBitAndHighestBitRequired cannot operate on "
191 "a empty list of uses.");
192
193 // when we don't want to narrowTrailingBits (namely in arithmetic
194 // operations), forcing lowestBitRequired = 0
195 size_t lowestBitRequired = narrowTrailingBits ? originalOpWidth - 1 : 0;
196 size_t highestBitRequired = 0;
197
198 for (auto *user : users) {
199 if (auto extractOp = dyn_cast<ExtractOp>(user)) {
200 size_t lowBit = extractOp.getLowBit();
201 size_t highBit =
202 cast<IntegerType>(extractOp.getType()).getWidth() + lowBit - 1;
203 highestBitRequired = std::max(highestBitRequired, highBit);
204 lowestBitRequired = std::min(lowestBitRequired, lowBit);
205 continue;
206 }
207
208 highestBitRequired = originalOpWidth - 1;
209 lowestBitRequired = 0;
210 break;
211 }
212
213 return {lowestBitRequired, highestBitRequired};
214}
215
216template <class OpTy>
217static bool narrowOperationWidth(OpTy op, bool narrowTrailingBits,
218 PatternRewriter &rewriter) {
219 IntegerType opType = dyn_cast<IntegerType>(op.getResult().getType());
220 if (!opType)
221 return false;
222
223 auto range = getLowestBitAndHighestBitRequired(op, narrowTrailingBits,
224 opType.getWidth());
225 if (range.second + 1 == opType.getWidth() && range.first == 0)
226 return false;
227
228 SmallVector<Value> args;
229 auto newType = rewriter.getIntegerType(range.second - range.first + 1);
230 for (auto inop : op.getOperands()) {
231 // deal with muxes here
232 if (inop.getType() != op.getType())
233 args.push_back(inop);
234 else
235 args.push_back(rewriter.createOrFold<ExtractOp>(inop.getLoc(), newType,
236 inop, range.first));
237 }
238 auto newop = OpTy::create(rewriter, op.getLoc(), newType, args);
239 newop->setDialectAttrs(op->getDialectAttrs());
240 if (op.getTwoState())
241 newop.setTwoState(true);
242
243 Value newResult = newop.getResult();
244 if (range.first)
245 newResult = rewriter.createOrFold<ConcatOp>(
246 op.getLoc(), newResult,
247 hw::ConstantOp::create(rewriter, op.getLoc(),
248 APInt::getZero(range.first)));
249 if (range.second + 1 < opType.getWidth())
250 newResult = rewriter.createOrFold<ConcatOp>(
251 op.getLoc(),
253 rewriter, op.getLoc(),
254 APInt::getZero(opType.getWidth() - range.second - 1)),
255 newResult);
256 rewriter.replaceOp(op, newResult);
257 return true;
258}
259
260//===----------------------------------------------------------------------===//
261// Unary Operations
262//===----------------------------------------------------------------------===//
263
264OpFoldResult ReplicateOp::fold(FoldAdaptor adaptor) {
265 if (isOpTriviallyRecursive(*this))
266 return {};
267
268 // Replicate one time -> noop.
269 if (cast<IntegerType>(getType()).getWidth() ==
270 getInput().getType().getIntOrFloatBitWidth())
271 return getInput();
272
273 // Constant fold.
274 if (auto input = dyn_cast_or_null<IntegerAttr>(adaptor.getInput())) {
275 if (input.getValue().getBitWidth() == 1) {
276 if (input.getValue().isZero())
277 return getIntAttr(
278 APInt::getZero(cast<IntegerType>(getType()).getWidth()),
279 getContext());
280 return getIntAttr(
281 APInt::getAllOnes(cast<IntegerType>(getType()).getWidth()),
282 getContext());
283 }
284
285 APInt result = APInt::getZeroWidth();
286 for (auto i = getMultiple(); i != 0; --i)
287 result = result.concat(input.getValue());
288 return getIntAttr(result, getContext());
289 }
290
291 return {};
292}
293
294OpFoldResult ParityOp::fold(FoldAdaptor adaptor) {
295 if (isOpTriviallyRecursive(*this))
296 return {};
297
298 // Constant fold.
299 if (auto input = dyn_cast_or_null<IntegerAttr>(adaptor.getInput()))
300 return getIntAttr(APInt(1, input.getValue().popcount() & 1), getContext());
301
302 return {};
303}
304
305//===----------------------------------------------------------------------===//
306// Binary Operations
307//===----------------------------------------------------------------------===//
308
309/// Performs constant folding `calculate` with element-wise behavior on the two
310/// attributes in `operands` and returns the result if possible.
311static Attribute constFoldBinaryOp(ArrayRef<Attribute> operands,
312 hw::PEO paramOpcode) {
313 assert(operands.size() == 2 && "binary op takes two operands");
314 if (!operands[0] || !operands[1])
315 return {};
316
317 // Fold constants with ParamExprAttr::get which handles simple constants as
318 // well as parameter expressions.
319 return hw::ParamExprAttr::get(paramOpcode, cast<TypedAttr>(operands[0]),
320 cast<TypedAttr>(operands[1]));
321}
322
323OpFoldResult ShlOp::fold(FoldAdaptor adaptor) {
324 if (isOpTriviallyRecursive(*this))
325 return {};
326
327 if (auto rhs = dyn_cast_or_null<IntegerAttr>(adaptor.getRhs())) {
328 if (rhs.getValue().isZero())
329 return getOperand(0);
330
331 unsigned width = getType().getIntOrFloatBitWidth();
332 if (rhs.getValue().uge(width))
333 return getIntAttr(APInt::getZero(width), getContext());
334 }
335 return constFoldBinaryOp(adaptor.getOperands(), hw::PEO::Shl);
336}
337
338LogicalResult ShlOp::canonicalize(ShlOp op, PatternRewriter &rewriter) {
340 return failure();
341
342 // ShlOp(x, cst) -> Concat(Extract(x), zeros)
343 APInt value;
344 if (!matchPattern(op.getRhs(), m_ConstantInt(&value)))
345 return failure();
346
347 unsigned width = cast<IntegerType>(op.getLhs().getType()).getWidth();
348 if (value.ugt(width))
349 value = width;
350 unsigned shift = value.getZExtValue();
351
352 // This case is handled by fold.
353 if (width <= shift || shift == 0)
354 return failure();
355
356 auto zeros =
357 hw::ConstantOp::create(rewriter, op.getLoc(), APInt::getZero(shift));
358
359 // Remove the high bits which would be removed by the Shl.
360 auto extract =
361 ExtractOp::create(rewriter, op.getLoc(), op.getLhs(), 0, width - shift);
362
363 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, extract, zeros);
364 return success();
365}
366
367OpFoldResult ShrUOp::fold(FoldAdaptor adaptor) {
368 if (isOpTriviallyRecursive(*this))
369 return {};
370
371 if (auto rhs = dyn_cast_or_null<IntegerAttr>(adaptor.getRhs())) {
372 if (rhs.getValue().isZero())
373 return getOperand(0);
374
375 unsigned width = getType().getIntOrFloatBitWidth();
376 if (rhs.getValue().uge(width))
377 return getIntAttr(APInt::getZero(width), getContext());
378 }
379 return constFoldBinaryOp(adaptor.getOperands(), hw::PEO::ShrU);
380}
381
382LogicalResult ShrUOp::canonicalize(ShrUOp op, PatternRewriter &rewriter) {
384 return failure();
385
386 // ShrUOp(x, cst) -> Concat(zeros, Extract(x))
387 APInt value;
388 if (!matchPattern(op.getRhs(), m_ConstantInt(&value)))
389 return failure();
390
391 unsigned width = cast<IntegerType>(op.getLhs().getType()).getWidth();
392 if (value.ugt(width))
393 value = width;
394 unsigned shift = value.getZExtValue();
395
396 // This case is handled by fold.
397 if (width <= shift || shift == 0)
398 return failure();
399
400 auto zeros =
401 hw::ConstantOp::create(rewriter, op.getLoc(), APInt::getZero(shift));
402
403 // Remove the low bits which would be removed by the Shr.
404 auto extract = ExtractOp::create(rewriter, op.getLoc(), op.getLhs(), shift,
405 width - shift);
406
407 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, zeros, extract);
408 return success();
409}
410
411OpFoldResult ShrSOp::fold(FoldAdaptor adaptor) {
412 if (isOpTriviallyRecursive(*this))
413 return {};
414
415 if (auto rhs = dyn_cast_or_null<IntegerAttr>(adaptor.getRhs()))
416 if (rhs.getValue().isZero())
417 return getOperand(0);
418 return constFoldBinaryOp(adaptor.getOperands(), hw::PEO::ShrS);
419}
420
421LogicalResult ShrSOp::canonicalize(ShrSOp op, PatternRewriter &rewriter) {
423 return failure();
424
425 // ShrSOp(x, cst) -> Concat(replicate(extract(x, topbit)),extract(x))
426 APInt value;
427 if (!matchPattern(op.getRhs(), m_ConstantInt(&value)))
428 return failure();
429
430 unsigned width = cast<IntegerType>(op.getLhs().getType()).getWidth();
431 if (value.ugt(width))
432 value = width;
433 unsigned shift = value.getZExtValue();
434
435 auto topbit =
436 rewriter.createOrFold<ExtractOp>(op.getLoc(), op.getLhs(), width - 1, 1);
437 auto sext = rewriter.createOrFold<ReplicateOp>(op.getLoc(), topbit, shift);
438
439 if (width == shift) {
440 replaceOpAndCopyNamehint(rewriter, op, {sext});
441 return success();
442 }
443
444 auto extract = ExtractOp::create(rewriter, op.getLoc(), op.getLhs(), shift,
445 width - shift);
446
447 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, sext, extract);
448 return success();
449}
450
451//===----------------------------------------------------------------------===//
452// Other Operations
453//===----------------------------------------------------------------------===//
454
455OpFoldResult ExtractOp::fold(FoldAdaptor adaptor) {
456 if (isOpTriviallyRecursive(*this))
457 return {};
458
459 // If we are extracting the entire input, then return it.
460 if (getInput().getType() == getType())
461 return getInput();
462
463 // Constant fold.
464 if (auto input = dyn_cast_or_null<IntegerAttr>(adaptor.getInput())) {
465 unsigned dstWidth = cast<IntegerType>(getType()).getWidth();
466 return getIntAttr(input.getValue().lshr(getLowBit()).trunc(dstWidth),
467 getContext());
468 }
469 return {};
470}
471
472// Transforms extract(lo, cat(a, b, c, d, e)) into
473// cat(extract(lo1, b), c, extract(lo2, d)).
474// innerCat must be the argument of the provided ExtractOp.
476 ConcatOp innerCat,
477 PatternRewriter &rewriter) {
478 auto reversedConcatArgs = llvm::reverse(innerCat.getInputs());
479 size_t beginOfFirstRelevantElement = 0;
480 auto it = reversedConcatArgs.begin();
481 size_t lowBit = op.getLowBit();
482
483 // This loop finds the first concatArg that is covered by the ExtractOp
484 for (; it != reversedConcatArgs.end(); it++) {
485 assert(beginOfFirstRelevantElement <= lowBit &&
486 "incorrectly moved past an element that lowBit has coverage over");
487 auto operand = *it;
488
489 size_t operandWidth = operand.getType().getIntOrFloatBitWidth();
490 if (lowBit < beginOfFirstRelevantElement + operandWidth) {
491 // A bit other than the first bit will be used in this element.
492 // ...... ........ ...
493 // ^---lowBit
494 // ^---beginOfFirstRelevantElement
495 //
496 // Edge-case close to the end of the range.
497 // ...... ........ ...
498 // ^---(position + operandWidth)
499 // ^---lowBit
500 // ^---beginOfFirstRelevantElement
501 //
502 // Edge-case close to the beginning of the rang
503 // ...... ........ ...
504 // ^---lowBit
505 // ^---beginOfFirstRelevantElement
506 //
507 break;
508 }
509
510 // extraction discards this element.
511 // ...... ........ ...
512 // | ^---lowBit
513 // ^---beginOfFirstRelevantElement
514 beginOfFirstRelevantElement += operandWidth;
515 }
516 assert(it != reversedConcatArgs.end() &&
517 "incorrectly failed to find an element which contains coverage of "
518 "lowBit");
519
520 SmallVector<Value> reverseConcatArgs;
521 size_t widthRemaining = cast<IntegerType>(op.getType()).getWidth();
522 size_t extractLo = lowBit - beginOfFirstRelevantElement;
523
524 // Transform individual arguments of innerCat(..., a, b, c,) into
525 // [ extract(a), b, extract(c) ], skipping an extract operation where
526 // possible.
527 for (; widthRemaining != 0 && it != reversedConcatArgs.end(); it++) {
528 auto concatArg = *it;
529 size_t operandWidth = concatArg.getType().getIntOrFloatBitWidth();
530 size_t widthToConsume = std::min(widthRemaining, operandWidth - extractLo);
531
532 if (widthToConsume == operandWidth && extractLo == 0) {
533 reverseConcatArgs.push_back(concatArg);
534 } else {
535 auto resultType = IntegerType::get(rewriter.getContext(), widthToConsume);
536 reverseConcatArgs.push_back(
537 ExtractOp::create(rewriter, op.getLoc(), resultType, *it, extractLo));
538 }
539
540 widthRemaining -= widthToConsume;
541
542 // Beyond the first element, all elements are extracted from position 0.
543 extractLo = 0;
544 }
545
546 if (reverseConcatArgs.size() == 1) {
547 replaceOpAndCopyNamehint(rewriter, op, reverseConcatArgs[0]);
548 } else {
549 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(
550 rewriter, op, SmallVector<Value>(llvm::reverse(reverseConcatArgs)));
551 }
552 return success();
553}
554
555// Transforms extract(lo, replicate(a, N)) into replicate(a, N-c).
556static bool extractFromReplicate(ExtractOp op, ReplicateOp replicate,
557 PatternRewriter &rewriter) {
558 auto extractResultWidth = cast<IntegerType>(op.getType()).getWidth();
559 auto replicateEltWidth =
560 replicate.getOperand().getType().getIntOrFloatBitWidth();
561
562 // If the extract starts at the base of an element and is an even multiple,
563 // we can replace the extract with a smaller replicate.
564 if (op.getLowBit() % replicateEltWidth == 0 &&
565 extractResultWidth % replicateEltWidth == 0) {
566 replaceOpWithNewOpAndCopyNamehint<ReplicateOp>(rewriter, op, op.getType(),
567 replicate.getOperand());
568 return true;
569 }
570
571 // If the extract is completely contained in one element, extract from the
572 // element.
573 if (op.getLowBit() % replicateEltWidth + extractResultWidth <=
574 replicateEltWidth) {
575 replaceOpWithNewOpAndCopyNamehint<ExtractOp>(
576 rewriter, op, op.getType(), replicate.getOperand(),
577 op.getLowBit() % replicateEltWidth);
578 return true;
579 }
580
581 // We don't currently handle the case of extracting from non-whole elements,
582 // e.g. `extract (replicate 2-bit-thing, N), 1`.
583 return false;
584}
585
586LogicalResult ExtractOp::canonicalize(ExtractOp op, PatternRewriter &rewriter) {
588 return failure();
589 auto *inputOp = op.getInput().getDefiningOp();
590
591 // This turns out to be incredibly expensive. Disable until performance is
592 // addressed.
593#if 0
594 // If the extracted bits are all known, then return the result.
595 auto knownBits = computeKnownBits(op.getInput())
596 .extractBits(cast<IntegerType>(op.getType()).getWidth(),
597 op.getLowBit());
598 if (knownBits.isConstant()) {
599 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
600 knownBits.getConstant());
601 return success();
602 }
603#endif
604
605 // extract(olo, extract(ilo, x)) = extract(olo + ilo, x)
606 if (auto innerExtract = dyn_cast_or_null<ExtractOp>(inputOp)) {
607 replaceOpWithNewOpAndCopyNamehint<ExtractOp>(
608 rewriter, op, op.getType(), innerExtract.getInput(),
609 innerExtract.getLowBit() + op.getLowBit());
610 return success();
611 }
612
613 // extract(lo, cat(a, b, c, d, e)) = cat(extract(lo1, b), c, extract(lo2, d))
614 if (auto innerCat = dyn_cast_or_null<ConcatOp>(inputOp))
615 return extractConcatToConcatExtract(op, innerCat, rewriter);
616
617 // extract(lo, replicate(a))
618 if (auto replicate = dyn_cast_or_null<ReplicateOp>(inputOp))
619 if (extractFromReplicate(op, replicate, rewriter))
620 return success();
621
622 // `extract(and(a, cst))` -> `extract(a)` when the relevant bits of the
623 // and/or/xor are not modifying the extracted bits.
624 if (inputOp && inputOp->getNumOperands() == 2 &&
625 isa<AndOp, OrOp, XorOp>(inputOp)) {
626 if (auto cstRHS = inputOp->getOperand(1).getDefiningOp<hw::ConstantOp>()) {
627 auto extractedCst = cstRHS.getValue().extractBits(
628 cast<IntegerType>(op.getType()).getWidth(), op.getLowBit());
629 if (isa<OrOp, XorOp>(inputOp) && extractedCst.isZero()) {
630 replaceOpWithNewOpAndCopyNamehint<ExtractOp>(
631 rewriter, op, op.getType(), inputOp->getOperand(0), op.getLowBit());
632 return success();
633 }
634
635 // `extract(and(a, cst))` -> `concat(extract(a), 0)` if we only need one
636 // extract to represent the result. Turning it into a pile of extracts is
637 // always fine by our cost model, but we don't want to explode things into
638 // a ton of bits because it will bloat the IR and generated Verilog.
639 if (isa<AndOp>(inputOp)) {
640 // For our cost model, we only do this if the bit pattern is a
641 // contiguous series of ones.
642 unsigned lz = extractedCst.countLeadingZeros();
643 unsigned tz = extractedCst.countTrailingZeros();
644 unsigned pop = extractedCst.popcount();
645 if (extractedCst.getBitWidth() - lz - tz == pop) {
646 auto resultTy = rewriter.getIntegerType(pop);
647 SmallVector<Value> resultElts;
648 if (lz)
649 resultElts.push_back(hw::ConstantOp::create(rewriter, op.getLoc(),
650 APInt::getZero(lz)));
651 resultElts.push_back(rewriter.createOrFold<ExtractOp>(
652 op.getLoc(), resultTy, inputOp->getOperand(0),
653 op.getLowBit() + tz));
654 if (tz)
655 resultElts.push_back(hw::ConstantOp::create(rewriter, op.getLoc(),
656 APInt::getZero(tz)));
657 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, resultElts);
658 return success();
659 }
660 }
661 }
662 }
663
664 // `extract(lowBit, shl(1, x))` -> `x == lowBit` when a single bit is
665 // extracted.
666 if (cast<IntegerType>(op.getType()).getWidth() == 1 && inputOp)
667 if (auto shlOp = dyn_cast<ShlOp>(inputOp)) {
668 // Don't canonicalize if the shift is multiply used.
669 if (shlOp->hasOneUse())
670 if (auto lhsCst = shlOp.getLhs().getDefiningOp<hw::ConstantOp>())
671 if (lhsCst.getValue().isOne()) {
672 auto newCst = hw::ConstantOp::create(
673 rewriter, shlOp.getLoc(),
674 APInt(lhsCst.getValue().getBitWidth(), op.getLowBit()));
675 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(
676 rewriter, op, ICmpPredicate::eq, shlOp->getOperand(1), newCst,
677 false);
678 return success();
679 }
680 }
681
682 return failure();
683}
684
685//===----------------------------------------------------------------------===//
686// Associative Variadic operations
687//===----------------------------------------------------------------------===//
688
689// Reduce all operands to a single value (either integer constant or parameter
690// expression) if all the operands are constants.
691static Attribute constFoldAssociativeOp(ArrayRef<Attribute> operands,
692 hw::PEO paramOpcode) {
693 assert(operands.size() > 1 && "caller should handle one-operand case");
694 // We can only fold anything in the case where all operands are known to be
695 // constants. Check the least common one first for an early out.
696 if (!operands[1] || !operands[0])
697 return {};
698
699 // This will fold to a simple constant if all operands are constant.
700 if (llvm::all_of(operands.drop_front(2),
701 [&](Attribute in) { return !!in; })) {
702 SmallVector<mlir::TypedAttr> typedOperands;
703 typedOperands.reserve(operands.size());
704 for (auto operand : operands) {
705 if (auto typedOperand = dyn_cast<mlir::TypedAttr>(operand))
706 typedOperands.push_back(typedOperand);
707 else
708 break;
709 }
710 if (typedOperands.size() == operands.size())
711 return hw::ParamExprAttr::get(paramOpcode, typedOperands);
712 }
713
714 return {};
715}
716
717/// When we find a logical operation (and, or, xor) with a constant e.g.
718/// `X & 42`, we want to push the constant into the computation of X if it leads
719/// to simplification.
720///
721/// This function handles the case where the logical operation has a concat
722/// operand. We check to see if we can simplify the concat, e.g. when it has
723/// constant operands.
724///
725/// This returns true when a simplification happens.
726static bool canonicalizeLogicalCstWithConcat(Operation *logicalOp,
727 size_t concatIdx, const APInt &cst,
728 PatternRewriter &rewriter) {
729 auto concatOp = logicalOp->getOperand(concatIdx).getDefiningOp<ConcatOp>();
730 assert((isa<AndOp, OrOp, XorOp>(logicalOp) && concatOp));
731
732 // Check to see if any operands can be simplified by pushing the logical op
733 // into all parts of the concat.
734 bool canSimplify =
735 llvm::any_of(concatOp->getOperands(), [&](Value operand) -> bool {
736 auto *operandOp = operand.getDefiningOp();
737 if (!operandOp)
738 return false;
739
740 // If the concat has a constant operand then we can transform this.
741 if (isa<hw::ConstantOp>(operandOp))
742 return true;
743 // If the concat has the same logical operation and that operation has
744 // a constant operation than we can fold it into that suboperation.
745 return operandOp->getName() == logicalOp->getName() &&
746 operandOp->hasOneUse() && operandOp->getNumOperands() != 0 &&
747 operandOp->getOperands().back().getDefiningOp<hw::ConstantOp>();
748 });
749
750 if (!canSimplify)
751 return false;
752
753 // Create a new instance of the logical operation. We have to do this the
754 // hard way since we're generic across a family of different ops.
755 auto createLogicalOp = [&](ArrayRef<Value> operands) -> Value {
756 return createGenericOp(logicalOp->getLoc(), logicalOp->getName(), operands,
757 rewriter);
758 };
759
760 // Ok, let's do the transformation. We do this by slicing up the constant
761 // for each unit of the concat and duplicate the operation into the
762 // sub-operand.
763 SmallVector<Value> newConcatOperands;
764 newConcatOperands.reserve(concatOp->getNumOperands());
765
766 // Work from MSB to LSB.
767 size_t nextOperandBit = concatOp.getType().getIntOrFloatBitWidth();
768 for (Value operand : concatOp->getOperands()) {
769 size_t operandWidth = operand.getType().getIntOrFloatBitWidth();
770 nextOperandBit -= operandWidth;
771 // Take a slice of the constant.
772 auto eltCst =
773 hw::ConstantOp::create(rewriter, logicalOp->getLoc(),
774 cst.lshr(nextOperandBit).trunc(operandWidth));
775
776 newConcatOperands.push_back(createLogicalOp({operand, eltCst}));
777 }
778
779 // Create the concat, and the rest of the logical op if we need it.
780 Value newResult =
781 ConcatOp::create(rewriter, concatOp.getLoc(), newConcatOperands);
782
783 // If we had a variadic logical op on the top level, then recreate it with the
784 // new concat and without the constant operand.
785 if (logicalOp->getNumOperands() > 2) {
786 auto origOperands = logicalOp->getOperands();
787 SmallVector<Value> operands;
788 // Take any stuff before the concat.
789 operands.append(origOperands.begin(), origOperands.begin() + concatIdx);
790 // Take any stuff after the concat but before the constant.
791 operands.append(origOperands.begin() + concatIdx + 1,
792 origOperands.begin() + (origOperands.size() - 1));
793 // Include the new concat.
794 operands.push_back(newResult);
795 newResult = createLogicalOp(operands);
796 }
797
798 replaceOpAndCopyNamehint(rewriter, logicalOp, newResult);
799 return true;
800}
801
802// Determines whether the inputs to a logical element are of opposite
803// comparisons and can lowered into a constant.
804static bool canCombineOppositeBinCmpIntoConstant(OperandRange operands) {
805 llvm::SmallDenseSet<std::tuple<ICmpPredicate, Value, Value>> seenPredicates;
806
807 for (auto op : operands) {
808 if (auto icmpOp = op.getDefiningOp<ICmpOp>();
809 icmpOp && icmpOp.getTwoState()) {
810 auto predicate = icmpOp.getPredicate();
811 auto lhs = icmpOp.getLhs();
812 auto rhs = icmpOp.getRhs();
813 if (seenPredicates.contains(
814 {ICmpOp::getNegatedPredicate(predicate), lhs, rhs}))
815 return true;
816
817 seenPredicates.insert({predicate, lhs, rhs});
818 }
819 }
820 return false;
821}
822
823OpFoldResult AndOp::fold(FoldAdaptor adaptor) {
824 if (isOpTriviallyRecursive(*this))
825 return {};
826
827 APInt value = APInt::getAllOnes(cast<IntegerType>(getType()).getWidth());
828
829 auto inputs = adaptor.getInputs();
830
831 // and(x, 01, 10) -> 00 -- annulment.
832 for (auto operand : inputs) {
833 if (!operand)
834 continue;
835 value &= cast<IntegerAttr>(operand).getValue();
836 if (value.isZero())
837 return getIntAttr(value, getContext());
838 }
839
840 // and(x, -1) -> x.
841 if (inputs.size() == 2 && inputs[1] &&
842 cast<IntegerAttr>(inputs[1]).getValue().isAllOnes())
843 return getInputs()[0];
844
845 // and(x, x, x) -> x. This also handles and(x) -> x.
846 if (llvm::all_of(getInputs(),
847 [&](auto in) { return in == this->getInputs()[0]; }))
848 return getInputs()[0];
849
850 // and(..., x, ..., ~x, ...) -> 0
851 for (Value arg : getInputs()) {
852 Value subExpr;
853 if (matchPattern(arg, m_Complement(m_Any(&subExpr)))) {
854 for (Value arg2 : getInputs())
855 if (arg2 == subExpr)
856 return getIntAttr(
857 APInt::getZero(cast<IntegerType>(getType()).getWidth()),
858 getContext());
859 }
860 }
861
862 // x0 = icmp(pred, x, y)
863 // x1 = icmp(!pred, x, y)
864 // and(x0, x1) -> 0
866 return getIntAttr(APInt::getZero(cast<IntegerType>(getType()).getWidth()),
867 getContext());
868
869 // Constant fold
870 return constFoldAssociativeOp(inputs, hw::PEO::And);
871}
872
873/// Returns a single common operand that all inputs of the operation `op` can
874/// be traced back to, or an empty `Value` if no such operand exists.
875///
876/// For example for `or(a[0], a[1], ..., a[n-1])` this function returns `a`
877/// (assuming the bit-width of `a` is `n`).
878template <typename Op>
879static Value getCommonOperand(Op op) {
880 if (!op.getType().isInteger(1))
881 return Value();
882
883 auto inputs = op.getInputs();
884 size_t size = inputs.size();
885
886 auto sourceOp = inputs[0].template getDefiningOp<ExtractOp>();
887 if (!sourceOp)
888 return Value();
889 Value source = sourceOp.getOperand();
890
891 // Fast path: the input size is not equal to the width of the source.
892 if (size != source.getType().getIntOrFloatBitWidth())
893 return Value();
894
895 // Tracks the bits that were encountered.
896 llvm::BitVector bits(size);
897 bits.set(sourceOp.getLowBit());
898
899 for (size_t i = 1; i != size; ++i) {
900 auto extractOp = inputs[i].template getDefiningOp<ExtractOp>();
901 if (!extractOp || extractOp.getOperand() != source)
902 return Value();
903 bits.set(extractOp.getLowBit());
904 }
905
906 return bits.all() ? source : Value();
907}
908
909/// Canonicalize an idempotent operation `op` so that only one input of any kind
910/// occurs.
911///
912/// Example: `and(x, y, x, z)` -> `and(x, y, z)`
913template <typename Op>
914static bool canonicalizeIdempotentInputs(Op op, PatternRewriter &rewriter) {
915 // Depth limit to search, in operations. Chosen arbitrarily, keep small.
916 constexpr unsigned limit = 3;
917 auto inputs = op.getInputs();
918
919 llvm::SmallSetVector<Value, 8> uniqueInputs(inputs.begin(), inputs.end());
920 llvm::SmallDenseSet<Op, 8> checked;
921 checked.insert(op);
922
923 struct OpWithDepth {
924 Op op;
925 unsigned depth;
926 };
927 llvm::SmallVector<OpWithDepth, 8> worklist;
928
929 auto enqueue = [&worklist, &checked, &op](Value input, unsigned depth) {
930 // Add to worklist if within depth limit, is defined in the same block by
931 // the same kind of operation, has same two-state-ness, and not enqueued
932 // previously.
933 if (depth < limit && input.getParentBlock() == op->getBlock()) {
934 auto inputOp = input.template getDefiningOp<Op>();
935 if (inputOp && inputOp.getTwoState() == op.getTwoState() &&
936 checked.insert(inputOp).second)
937 worklist.push_back({inputOp, depth + 1});
938 }
939 };
940
941 for (auto input : uniqueInputs)
942 enqueue(input, 0);
943
944 while (!worklist.empty()) {
945 auto item = worklist.pop_back_val();
946
947 for (auto input : item.op.getInputs()) {
948 uniqueInputs.remove(input);
949 enqueue(input, item.depth);
950 }
951 }
952
953 if (uniqueInputs.size() < inputs.size()) {
954 replaceOpWithNewOpAndCopyNamehint<Op>(rewriter, op, op.getType(),
955 uniqueInputs.getArrayRef(),
956 op.getTwoState());
957 return true;
958 }
959
960 return false;
961}
962
963LogicalResult AndOp::canonicalize(AndOp op, PatternRewriter &rewriter) {
965 return failure();
966
967 auto inputs = op.getInputs();
968 auto size = inputs.size();
969
970 // and(x, and(...)) -> and(x, ...) -- flatten
971 if (tryFlatteningOperands(op, rewriter))
972 return success();
973
974 // and(..., x, ..., x) -> and(..., x, ...) -- idempotent
975 // and(..., x, and(..., x, ...)) -> and(..., and(..., x, ...)) -- idempotent
976 // Trivial and(x), and(x, x) cases are handled by [AndOp::fold] above.
977 if (size > 1 && canonicalizeIdempotentInputs(op, rewriter))
978 return success();
979
980 assert(size > 1 && "expected 2 or more operands, `fold` should handle this");
981
982 // Patterns for and with a constant on RHS.
983 APInt value;
984 if (matchPattern(inputs.back(), m_ConstantInt(&value))) {
985 // and(..., '1) -> and(...) -- identity
986 if (value.isAllOnes()) {
987 replaceOpWithNewOpAndCopyNamehint<AndOp>(rewriter, op, op.getType(),
988 inputs.drop_back(), false);
989 return success();
990 }
991
992 // TODO: Combine multiple constants together even if they aren't at the
993 // end. and(..., c1, c2) -> and(..., c3) where c3 = c1 & c2 -- constant
994 // folding
995 APInt value2;
996 if (matchPattern(inputs[size - 2], m_ConstantInt(&value2))) {
997 auto cst = hw::ConstantOp::create(rewriter, op.getLoc(), value & value2);
998 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
999 newOperands.push_back(cst);
1000 replaceOpWithNewOpAndCopyNamehint<AndOp>(rewriter, op, op.getType(),
1001 newOperands, false);
1002 return success();
1003 }
1004
1005 // Handle 'and' with a single bit constant on the RHS.
1006 if (size == 2 && value.isPowerOf2()) {
1007 // If the LHS is a replicate from a single bit, we can 'concat' it
1008 // into place. e.g.:
1009 // `replicate(x) & 4` -> `concat(zeros, x, zeros)`
1010 // TODO: Generalize this for non-single-bit operands.
1011 if (auto replicate = inputs[0].getDefiningOp<ReplicateOp>()) {
1012 auto replicateOperand = replicate.getOperand();
1013 if (replicateOperand.getType().isInteger(1)) {
1014 unsigned resultWidth = op.getType().getIntOrFloatBitWidth();
1015 auto trailingZeros = value.countTrailingZeros();
1016
1017 // Don't add zero bit constants unnecessarily.
1018 SmallVector<Value, 3> concatOperands;
1019 if (trailingZeros != resultWidth - 1) {
1020 auto highZeros = hw::ConstantOp::create(
1021 rewriter, op.getLoc(),
1022 APInt::getZero(resultWidth - trailingZeros - 1));
1023 concatOperands.push_back(highZeros);
1024 }
1025 concatOperands.push_back(replicateOperand);
1026 if (trailingZeros != 0) {
1027 auto lowZeros = hw::ConstantOp::create(
1028 rewriter, op.getLoc(), APInt::getZero(trailingZeros));
1029 concatOperands.push_back(lowZeros);
1030 }
1031 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(
1032 rewriter, op, op.getType(), concatOperands);
1033 return success();
1034 }
1035 }
1036 }
1037
1038 // Narrow the op if the constant has leading or trailing zeros.
1039 //
1040 // and(a, 0b00101100) -> concat(0b00, and(extract(a), 0b1011), 0b00)
1041 unsigned leadingZeros = value.countLeadingZeros();
1042 unsigned trailingZeros = value.countTrailingZeros();
1043 if (leadingZeros > 0 || trailingZeros > 0) {
1044 unsigned maskLength = value.getBitWidth() - leadingZeros - trailingZeros;
1045
1046 // Extract the non-zero regions of the operands. Look through extracts.
1047 SmallVector<Value> operands;
1048 for (auto input : inputs.drop_back()) {
1049 unsigned offset = trailingZeros;
1050 while (auto extractOp = input.getDefiningOp<ExtractOp>()) {
1051 input = extractOp.getInput();
1052 offset += extractOp.getLowBit();
1053 }
1054 operands.push_back(ExtractOp::create(rewriter, op.getLoc(), input,
1055 offset, maskLength));
1056 }
1057
1058 // Add the narrowed mask if needed.
1059 auto narrowMask = value.extractBits(maskLength, trailingZeros);
1060 if (!narrowMask.isAllOnes())
1061 operands.push_back(hw::ConstantOp::create(
1062 rewriter, inputs.back().getLoc(), narrowMask));
1063
1064 // Create the narrow and op.
1065 Value narrowValue = operands.back();
1066 if (operands.size() > 1)
1067 narrowValue =
1068 AndOp::create(rewriter, op.getLoc(), operands, op.getTwoState());
1069 operands.clear();
1070
1071 // Concatenate the narrow and with the leading and trailing zeros.
1072 if (leadingZeros > 0)
1073 operands.push_back(hw::ConstantOp::create(
1074 rewriter, op.getLoc(), APInt::getZero(leadingZeros)));
1075 operands.push_back(narrowValue);
1076 if (trailingZeros > 0)
1077 operands.push_back(hw::ConstantOp::create(
1078 rewriter, op.getLoc(), APInt::getZero(trailingZeros)));
1079 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, operands);
1080 return success();
1081 }
1082
1083 // and(concat(x, cst1), a, b, c, cst2)
1084 // ==> and(a, b, c, concat(and(x,cst2'), and(cst1,cst2'')).
1085 // We do this for even more multi-use concats since they are "just wiring".
1086 for (size_t i = 0; i < size - 1; ++i) {
1087 if (auto concat = inputs[i].getDefiningOp<ConcatOp>())
1088 if (canonicalizeLogicalCstWithConcat(op, i, value, rewriter))
1089 return success();
1090 }
1091 }
1092
1093 // extracts only of and(...) -> and(extract()...)
1094 if (narrowOperationWidth(op, true, rewriter))
1095 return success();
1096
1097 // and(a[0], a[1], ..., a[n]) -> icmp eq(a, -1)
1098 if (auto source = getCommonOperand(op)) {
1099 auto cmpAgainst =
1100 hw::ConstantOp::create(rewriter, op.getLoc(), APInt::getAllOnes(size));
1101 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(rewriter, op, ICmpPredicate::eq,
1102 source, cmpAgainst);
1103 return success();
1104 }
1105
1106 /// TODO: and(..., x, not(x)) -> and(..., 0) -- complement
1107 return failure();
1108}
1109
1110OpFoldResult OrOp::fold(FoldAdaptor adaptor) {
1111 if (isOpTriviallyRecursive(*this))
1112 return {};
1113
1114 auto value = APInt::getZero(cast<IntegerType>(getType()).getWidth());
1115 auto inputs = adaptor.getInputs();
1116 // or(x, 10, 01) -> 11
1117 for (auto operand : inputs) {
1118 if (!operand)
1119 continue;
1120 value |= cast<IntegerAttr>(operand).getValue();
1121 if (value.isAllOnes())
1122 return getIntAttr(value, getContext());
1123 }
1124
1125 // or(x, 0) -> x
1126 if (inputs.size() == 2 && inputs[1] &&
1127 cast<IntegerAttr>(inputs[1]).getValue().isZero())
1128 return getInputs()[0];
1129
1130 // or(x, x, x) -> x. This also handles or(x) -> x
1131 if (llvm::all_of(getInputs(),
1132 [&](auto in) { return in == this->getInputs()[0]; }))
1133 return getInputs()[0];
1134
1135 // or(..., x, ..., ~x, ...) -> -1
1136 for (Value arg : getInputs()) {
1137 Value subExpr;
1138 if (matchPattern(arg, m_Complement(m_Any(&subExpr)))) {
1139 for (Value arg2 : getInputs())
1140 if (arg2 == subExpr)
1141 return getIntAttr(
1142 APInt::getAllOnes(cast<IntegerType>(getType()).getWidth()),
1143 getContext());
1144 }
1145 }
1146
1147 // x0 = icmp(pred, x, y)
1148 // x1 = icmp(!pred, x, y)
1149 // or(x0, x1) -> 1
1150 if (canCombineOppositeBinCmpIntoConstant(getInputs()))
1151 return getIntAttr(
1152 APInt::getAllOnes(cast<IntegerType>(getType()).getWidth()),
1153 getContext());
1154
1155 // Constant fold
1156 return constFoldAssociativeOp(inputs, hw::PEO::Or);
1157}
1158
1159LogicalResult OrOp::canonicalize(OrOp op, PatternRewriter &rewriter) {
1160 if (isOpTriviallyRecursive(op))
1161 return failure();
1162
1163 auto inputs = op.getInputs();
1164 auto size = inputs.size();
1165
1166 // or(x, or(...)) -> or(x, ...) -- flatten
1167 if (tryFlatteningOperands(op, rewriter))
1168 return success();
1169
1170 // or(..., x, ..., x, ...) -> or(..., x) -- idempotent
1171 // or(..., x, or(..., x, ...)) -> or(..., or(..., x, ...)) -- idempotent
1172 // Trivial or(x), or(x, x) cases are handled by [OrOp::fold].
1173 if (size > 1 && canonicalizeIdempotentInputs(op, rewriter))
1174 return success();
1175
1176 assert(size > 1 && "expected 2 or more operands");
1177
1178 // Patterns for and with a constant on RHS.
1179 APInt value;
1180 if (matchPattern(inputs.back(), m_ConstantInt(&value))) {
1181 // or(..., '0) -> or(...) -- identity
1182 if (value.isZero()) {
1183 replaceOpWithNewOpAndCopyNamehint<OrOp>(rewriter, op, op.getType(),
1184 inputs.drop_back());
1185 return success();
1186 }
1187
1188 // or(..., c1, c2) -> or(..., c3) where c3 = c1 | c2 -- constant folding
1189 APInt value2;
1190 if (matchPattern(inputs[size - 2], m_ConstantInt(&value2))) {
1191 auto cst = hw::ConstantOp::create(rewriter, op.getLoc(), value | value2);
1192 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1193 newOperands.push_back(cst);
1194 replaceOpWithNewOpAndCopyNamehint<OrOp>(rewriter, op, op.getType(),
1195 newOperands);
1196 return success();
1197 }
1198
1199 // or(concat(x, cst1), a, b, c, cst2)
1200 // ==> or(a, b, c, concat(or(x,cst2'), or(cst1,cst2'')).
1201 // We do this for even more multi-use concats since they are "just wiring".
1202 for (size_t i = 0; i < size - 1; ++i) {
1203 if (auto concat = inputs[i].getDefiningOp<ConcatOp>())
1204 if (canonicalizeLogicalCstWithConcat(op, i, value, rewriter))
1205 return success();
1206 }
1207 }
1208
1209 // extracts only of or(...) -> or(extract()...)
1210 if (narrowOperationWidth(op, true, rewriter))
1211 return success();
1212
1213 // or(a[0], a[1], ..., a[n]) -> icmp ne(a, 0)
1214 if (auto source = getCommonOperand(op)) {
1215 auto cmpAgainst =
1216 hw::ConstantOp::create(rewriter, op.getLoc(), APInt::getZero(size));
1217 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(rewriter, op, ICmpPredicate::ne,
1218 source, cmpAgainst);
1219 return success();
1220 }
1221
1222 // or(mux(c_1, a, 0), mux(c_2, a, 0), ..., mux(c_n, a, 0)) -> mux(or(c_1, c_2,
1223 // .., c_n), a, 0)
1224 if (auto firstMux = op.getOperand(0).getDefiningOp<comb::MuxOp>()) {
1225 APInt value;
1226 if (op.getTwoState() && firstMux.getTwoState() &&
1227 matchPattern(firstMux.getFalseValue(), m_ConstantInt(&value)) &&
1228 value.isZero()) {
1229 SmallVector<Value> conditions{firstMux.getCond()};
1230 auto check = [&](Value v) {
1231 auto mux = v.getDefiningOp<comb::MuxOp>();
1232 if (!mux)
1233 return false;
1234 conditions.push_back(mux.getCond());
1235 return mux.getTwoState() &&
1236 firstMux.getTrueValue() == mux.getTrueValue() &&
1237 firstMux.getFalseValue() == mux.getFalseValue();
1238 };
1239 if (llvm::all_of(op.getOperands().drop_front(), check)) {
1240 auto cond = comb::OrOp::create(rewriter, op.getLoc(), conditions, true);
1241 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
1242 rewriter, op, cond, firstMux.getTrueValue(),
1243 firstMux.getFalseValue(), true);
1244 return success();
1245 }
1246 }
1247 }
1248
1249 /// TODO: or(..., x, not(x)) -> or(..., '1) -- complement
1250 return failure();
1251}
1252
1253OpFoldResult XorOp::fold(FoldAdaptor adaptor) {
1254 if (isOpTriviallyRecursive(*this))
1255 return {};
1256
1257 auto size = getInputs().size();
1258 auto inputs = adaptor.getInputs();
1259
1260 // xor(x) -> x -- noop
1261 if (size == 1)
1262 return getInputs()[0];
1263
1264 // xor(x, x) -> 0 -- idempotent
1265 if (size == 2 && getInputs()[0] == getInputs()[1])
1266 return IntegerAttr::get(getType(), 0);
1267
1268 // xor(x, 0) -> x
1269 if (inputs.size() == 2 && inputs[1] &&
1270 cast<IntegerAttr>(inputs[1]).getValue().isZero())
1271 return getInputs()[0];
1272
1273 // xor(xor(x,1),1) -> x
1274 // but not self loop
1275 if (isBinaryNot()) {
1276 Value subExpr;
1277 if (matchPattern(getOperand(0), m_Complement(m_Any(&subExpr))) &&
1278 subExpr != getResult())
1279 return subExpr;
1280 }
1281
1282 // Constant fold
1283 return constFoldAssociativeOp(inputs, hw::PEO::Xor);
1284}
1285
1286// xor(icmp, a, b, 1) -> xor(icmp, a, b) if icmp has one user.
1287static void canonicalizeXorIcmpTrue(XorOp op, unsigned icmpOperand,
1288 PatternRewriter &rewriter) {
1289 auto icmp = op.getOperand(icmpOperand).getDefiningOp<ICmpOp>();
1290 auto negatedPred = ICmpOp::getNegatedPredicate(icmp.getPredicate());
1291
1292 Value result =
1293 ICmpOp::create(rewriter, icmp.getLoc(), negatedPred, icmp.getOperand(0),
1294 icmp.getOperand(1), icmp.getTwoState());
1295
1296 // If the xor had other operands, rebuild it.
1297 if (op.getNumOperands() > 2) {
1298 SmallVector<Value, 4> newOperands(op.getOperands());
1299 newOperands.pop_back();
1300 newOperands.erase(newOperands.begin() + icmpOperand);
1301 newOperands.push_back(result);
1302 result =
1303 XorOp::create(rewriter, op.getLoc(), newOperands, op.getTwoState());
1304 }
1305
1306 replaceOpAndCopyNamehint(rewriter, op, result);
1307}
1308
1309LogicalResult XorOp::canonicalize(XorOp op, PatternRewriter &rewriter) {
1310 if (isOpTriviallyRecursive(op))
1311 return failure();
1312
1313 auto inputs = op.getInputs();
1314 auto size = inputs.size();
1315 assert(size > 1 && "expected 2 or more operands");
1316
1317 // xor(..., x, x) -> xor (...) -- idempotent
1318 if (inputs[size - 1] == inputs[size - 2]) {
1319 assert(size > 2 &&
1320 "expected idempotent case for 2 elements handled already.");
1321 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, op.getType(),
1322 inputs.drop_back(/*n=*/2), false);
1323 return success();
1324 }
1325
1326 // Patterns for xor with a constant on RHS.
1327 APInt value;
1328 if (matchPattern(inputs.back(), m_ConstantInt(&value))) {
1329 // xor(..., 0) -> xor(...) -- identity
1330 if (value.isZero()) {
1331 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, op.getType(),
1332 inputs.drop_back(), false);
1333 return success();
1334 }
1335
1336 // xor(..., c1, c2) -> xor(..., c3) where c3 = c1 ^ c2.
1337 APInt value2;
1338 if (matchPattern(inputs[size - 2], m_ConstantInt(&value2))) {
1339 auto cst = hw::ConstantOp::create(rewriter, op.getLoc(), value ^ value2);
1340 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1341 newOperands.push_back(cst);
1342 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, op.getType(),
1343 newOperands, false);
1344 return success();
1345 }
1346
1347 bool isSingleBit = value.getBitWidth() == 1;
1348
1349 // Check for subexpressions that we can simplify.
1350 for (size_t i = 0; i < size - 1; ++i) {
1351 Value operand = inputs[i];
1352
1353 // xor(concat(x, cst1), a, b, c, cst2)
1354 // ==> xor(a, b, c, concat(xor(x,cst2'), xor(cst1,cst2'')).
1355 // We do this for even more multi-use concats since they are "just
1356 // wiring".
1357 if (auto concat = operand.getDefiningOp<ConcatOp>())
1358 if (canonicalizeLogicalCstWithConcat(op, i, value, rewriter))
1359 return success();
1360
1361 // xor(icmp, a, b, 1) -> xor(icmp, a, b) if icmp has one user.
1362 if (isSingleBit && operand.hasOneUse()) {
1363 assert(value == 1 && "single bit constant has to be one if not zero");
1364 if (auto icmp = operand.getDefiningOp<ICmpOp>())
1365 return canonicalizeXorIcmpTrue(op, i, rewriter), success();
1366 }
1367 }
1368 }
1369
1370 // xor(x, xor(...)) -> xor(x, ...) -- flatten
1371 if (tryFlatteningOperands(op, rewriter))
1372 return success();
1373
1374 // extracts only of xor(...) -> xor(extract()...)
1375 if (narrowOperationWidth(op, true, rewriter))
1376 return success();
1377
1378 // xor(a[0], a[1], ..., a[n]) -> parity(a)
1379 if (auto source = getCommonOperand(op)) {
1380 replaceOpWithNewOpAndCopyNamehint<ParityOp>(rewriter, op, source);
1381 return success();
1382 }
1383
1384 return failure();
1385}
1386
1387OpFoldResult SubOp::fold(FoldAdaptor adaptor) {
1388 if (isOpTriviallyRecursive(*this))
1389 return {};
1390
1391 // sub(x - x) -> 0
1392 if (getRhs() == getLhs())
1393 return getIntAttr(
1394 APInt::getZero(getLhs().getType().getIntOrFloatBitWidth()),
1395 getContext());
1396
1397 if (adaptor.getRhs()) {
1398 // If both are constants, we can unconditionally fold.
1399 if (adaptor.getLhs()) {
1400 // Constant fold (c1 - c2) => (c1 + -1*c2).
1401 auto negOne = getIntAttr(
1402 APInt::getAllOnes(getLhs().getType().getIntOrFloatBitWidth()),
1403 getContext());
1404 auto rhsNeg = hw::ParamExprAttr::get(
1405 hw::PEO::Mul, cast<TypedAttr>(adaptor.getRhs()), negOne);
1406 return hw::ParamExprAttr::get(hw::PEO::Add,
1407 cast<TypedAttr>(adaptor.getLhs()), rhsNeg);
1408 }
1409
1410 // sub(x - 0) -> x
1411 if (auto rhsC = dyn_cast<IntegerAttr>(adaptor.getRhs())) {
1412 if (rhsC.getValue().isZero())
1413 return getLhs();
1414 }
1415 }
1416
1417 return {};
1418}
1419
1420LogicalResult SubOp::canonicalize(SubOp op, PatternRewriter &rewriter) {
1421 if (isOpTriviallyRecursive(op))
1422 return failure();
1423
1424 // sub(x, cst) -> add(x, -cst)
1425 APInt value;
1426 if (matchPattern(op.getRhs(), m_ConstantInt(&value))) {
1427 auto negCst = hw::ConstantOp::create(rewriter, op.getLoc(), -value);
1428 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getLhs(), negCst,
1429 false);
1430 return success();
1431 }
1432
1433 // extracts only of sub(...) -> sub(extract()...)
1434 if (narrowOperationWidth(op, false, rewriter))
1435 return success();
1436
1437 return failure();
1438}
1439
1440OpFoldResult AddOp::fold(FoldAdaptor adaptor) {
1441 if (isOpTriviallyRecursive(*this))
1442 return {};
1443
1444 auto size = getInputs().size();
1445
1446 // add(x) -> x -- noop
1447 if (size == 1u)
1448 return getInputs()[0];
1449
1450 // Constant fold constant operands.
1451 return constFoldAssociativeOp(adaptor.getOperands(), hw::PEO::Add);
1452}
1453
1454LogicalResult AddOp::canonicalize(AddOp op, PatternRewriter &rewriter) {
1455 if (isOpTriviallyRecursive(op))
1456 return failure();
1457
1458 auto inputs = op.getInputs();
1459 auto size = inputs.size();
1460 assert(size > 1 && "expected 2 or more operands");
1461
1462 APInt value, value2;
1463
1464 // add(..., 0) -> add(...) -- identity
1465 if (matchPattern(inputs.back(), m_ConstantInt(&value)) && value.isZero()) {
1466 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getType(),
1467 inputs.drop_back(), false);
1468 return success();
1469 }
1470
1471 // add(..., c1, c2) -> add(..., c3) where c3 = c1 + c2 -- constant folding
1472 if (matchPattern(inputs[size - 1], m_ConstantInt(&value)) &&
1473 matchPattern(inputs[size - 2], m_ConstantInt(&value2))) {
1474 auto cst = hw::ConstantOp::create(rewriter, op.getLoc(), value + value2);
1475 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1476 newOperands.push_back(cst);
1477 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getType(),
1478 newOperands, false);
1479 return success();
1480 }
1481
1482 // add(..., x, x) -> add(..., shl(x, 1))
1483 if (inputs[size - 1] == inputs[size - 2]) {
1484 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1485
1486 auto one = hw::ConstantOp::create(rewriter, op.getLoc(), op.getType(), 1);
1487 auto shiftLeftOp =
1488 comb::ShlOp::create(rewriter, op.getLoc(), inputs.back(), one, false);
1489
1490 newOperands.push_back(shiftLeftOp);
1491 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getType(),
1492 newOperands, false);
1493 return success();
1494 }
1495
1496 auto shlOp = inputs[size - 1].getDefiningOp<comb::ShlOp>();
1497 // add(..., x, shl(x, c)) -> add(..., mul(x, (1 << c) + 1))
1498 if (shlOp && shlOp.getLhs() == inputs[size - 2] &&
1499 matchPattern(shlOp.getRhs(), m_ConstantInt(&value))) {
1500
1501 APInt one(/*numBits=*/value.getBitWidth(), 1, /*isSigned=*/false);
1502 auto rhs =
1503 hw::ConstantOp::create(rewriter, op.getLoc(), (one << value) + one);
1504
1505 std::array<Value, 2> factors = {shlOp.getLhs(), rhs};
1506 auto mulOp = comb::MulOp::create(rewriter, op.getLoc(), factors, false);
1507
1508 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1509 newOperands.push_back(mulOp);
1510 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getType(),
1511 newOperands, false);
1512 return success();
1513 }
1514
1515 auto mulOp = inputs[size - 1].getDefiningOp<comb::MulOp>();
1516 // add(..., x, mul(x, c)) -> add(..., mul(x, c + 1))
1517 if (mulOp && mulOp.getInputs().size() == 2 &&
1518 mulOp.getInputs()[0] == inputs[size - 2] &&
1519 matchPattern(mulOp.getInputs()[1], m_ConstantInt(&value))) {
1520
1521 APInt one(/*numBits=*/value.getBitWidth(), 1, /*isSigned=*/false);
1522 auto rhs = hw::ConstantOp::create(rewriter, op.getLoc(), value + one);
1523 std::array<Value, 2> factors = {mulOp.getInputs()[0], rhs};
1524 auto newMulOp = comb::MulOp::create(rewriter, op.getLoc(), factors, false);
1525
1526 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1527 newOperands.push_back(newMulOp);
1528 replaceOpWithNewOpAndCopyNamehint<AddOp>(rewriter, op, op.getType(),
1529 newOperands, false);
1530 return success();
1531 }
1532
1533 // add(a, add(...)) -> add(a, ...) -- flatten
1534 if (tryFlatteningOperands(op, rewriter))
1535 return success();
1536
1537 // extracts only of add(...) -> add(extract()...)
1538 if (narrowOperationWidth(op, false, rewriter))
1539 return success();
1540
1541 // add(add(x, c1), c2) -> add(x, c1 + c2)
1542 auto addOp = inputs[0].getDefiningOp<comb::AddOp>();
1543 if (addOp && addOp.getInputs().size() == 2 &&
1544 matchPattern(addOp.getInputs()[1], m_ConstantInt(&value2)) &&
1545 inputs.size() == 2 && matchPattern(inputs[1], m_ConstantInt(&value))) {
1546
1547 auto rhs = hw::ConstantOp::create(rewriter, op.getLoc(), value + value2);
1548 replaceOpWithNewOpAndCopyNamehint<AddOp>(
1549 rewriter, op, op.getType(), ArrayRef<Value>{addOp.getInputs()[0], rhs},
1550 /*twoState=*/op.getTwoState() && addOp.getTwoState());
1551 return success();
1552 }
1553
1554 return failure();
1555}
1556
1557OpFoldResult MulOp::fold(FoldAdaptor adaptor) {
1558 if (isOpTriviallyRecursive(*this))
1559 return {};
1560
1561 auto size = getInputs().size();
1562 auto inputs = adaptor.getInputs();
1563
1564 // mul(x) -> x -- noop
1565 if (size == 1u)
1566 return getInputs()[0];
1567
1568 auto width = cast<IntegerType>(getType()).getWidth();
1569 if (width == 0)
1570 return getIntAttr(APInt::getZero(0), getContext());
1571
1572 APInt value(/*numBits=*/width, 1, /*isSigned=*/false);
1573
1574 // mul(x, 0, 1) -> 0 -- annulment
1575 for (auto operand : inputs) {
1576 if (!operand)
1577 continue;
1578 value *= cast<IntegerAttr>(operand).getValue();
1579 if (value.isZero())
1580 return getIntAttr(value, getContext());
1581 }
1582
1583 // Constant fold
1584 return constFoldAssociativeOp(inputs, hw::PEO::Mul);
1585}
1586
1587LogicalResult MulOp::canonicalize(MulOp op, PatternRewriter &rewriter) {
1588 if (isOpTriviallyRecursive(op))
1589 return failure();
1590
1591 auto inputs = op.getInputs();
1592 auto size = inputs.size();
1593 assert(size > 1 && "expected 2 or more operands");
1594
1595 APInt value, value2;
1596
1597 // mul(x, c) -> shl(x, log2(c)), where c is a power of two.
1598 if (size == 2 && matchPattern(inputs.back(), m_ConstantInt(&value)) &&
1599 value.isPowerOf2()) {
1600 auto shift = hw::ConstantOp::create(rewriter, op.getLoc(), op.getType(),
1601 value.exactLogBase2());
1602 auto shlOp =
1603 comb::ShlOp::create(rewriter, op.getLoc(), inputs[0], shift, false);
1604
1605 replaceOpWithNewOpAndCopyNamehint<MulOp>(rewriter, op, op.getType(),
1606 ArrayRef<Value>(shlOp), false);
1607 return success();
1608 }
1609
1610 // mul(..., 1) -> mul(...) -- identity
1611 if (matchPattern(inputs.back(), m_ConstantInt(&value)) && value.isOne()) {
1612 replaceOpWithNewOpAndCopyNamehint<MulOp>(rewriter, op, op.getType(),
1613 inputs.drop_back());
1614 return success();
1615 }
1616
1617 // mul(..., c1, c2) -> mul(..., c3) where c3 = c1 * c2 -- constant folding
1618 if (matchPattern(inputs[size - 1], m_ConstantInt(&value)) &&
1619 matchPattern(inputs[size - 2], m_ConstantInt(&value2))) {
1620 auto cst = hw::ConstantOp::create(rewriter, op.getLoc(), value * value2);
1621 SmallVector<Value, 4> newOperands(inputs.drop_back(/*n=*/2));
1622 newOperands.push_back(cst);
1623 replaceOpWithNewOpAndCopyNamehint<MulOp>(rewriter, op, op.getType(),
1624 newOperands);
1625 return success();
1626 }
1627
1628 // mul(a, mul(...)) -> mul(a, ...) -- flatten
1629 if (tryFlatteningOperands(op, rewriter))
1630 return success();
1631
1632 // extracts only of mul(...) -> mul(extract()...)
1633 if (narrowOperationWidth(op, false, rewriter))
1634 return success();
1635
1636 return failure();
1637}
1638
1639template <class Op, bool isSigned>
1640static OpFoldResult foldDiv(Op op, ArrayRef<Attribute> constants) {
1641 if (auto rhsValue = dyn_cast_or_null<IntegerAttr>(constants[1])) {
1642 // divu(x, 1) -> x, divs(x, 1) -> x
1643 if (rhsValue.getValue() == 1)
1644 return op.getLhs();
1645
1646 // If the divisor is zero, do not fold for now.
1647 if (rhsValue.getValue().isZero())
1648 return {};
1649 }
1650
1651 return constFoldBinaryOp(constants, isSigned ? hw::PEO::DivS : hw::PEO::DivU);
1652}
1653
1654OpFoldResult DivUOp::fold(FoldAdaptor adaptor) {
1655 if (isOpTriviallyRecursive(*this))
1656 return {};
1657 return foldDiv<DivUOp, /*isSigned=*/false>(*this, adaptor.getOperands());
1658}
1659
1660OpFoldResult DivSOp::fold(FoldAdaptor adaptor) {
1661 if (isOpTriviallyRecursive(*this))
1662 return {};
1663 return foldDiv<DivSOp, /*isSigned=*/true>(*this, adaptor.getOperands());
1664}
1665
1666template <class Op, bool isSigned>
1667static OpFoldResult foldMod(Op op, ArrayRef<Attribute> constants) {
1668 if (auto rhsValue = dyn_cast_or_null<IntegerAttr>(constants[1])) {
1669 // modu(x, 1) -> 0, mods(x, 1) -> 0
1670 if (rhsValue.getValue() == 1)
1671 return getIntAttr(APInt::getZero(op.getType().getIntOrFloatBitWidth()),
1672 op.getContext());
1673
1674 // If the divisor is zero, do not fold for now.
1675 if (rhsValue.getValue().isZero())
1676 return {};
1677 }
1678
1679 if (auto lhsValue = dyn_cast_or_null<IntegerAttr>(constants[0])) {
1680 // modu(0, x) -> 0, mods(0, x) -> 0
1681 if (lhsValue.getValue().isZero())
1682 return getIntAttr(APInt::getZero(op.getType().getIntOrFloatBitWidth()),
1683 op.getContext());
1684 }
1685
1686 return constFoldBinaryOp(constants, isSigned ? hw::PEO::ModS : hw::PEO::ModU);
1687}
1688
1689OpFoldResult ModUOp::fold(FoldAdaptor adaptor) {
1690 if (isOpTriviallyRecursive(*this))
1691 return {};
1692 return foldMod<ModUOp, /*isSigned=*/false>(*this, adaptor.getOperands());
1693}
1694
1695OpFoldResult ModSOp::fold(FoldAdaptor adaptor) {
1696 if (isOpTriviallyRecursive(*this))
1697 return {};
1698 return foldMod<ModSOp, /*isSigned=*/true>(*this, adaptor.getOperands());
1699}
1700
1701LogicalResult DivUOp::canonicalize(DivUOp op, PatternRewriter &rewriter) {
1702 if (isOpTriviallyRecursive(op) || !op.getTwoState())
1703 return failure();
1704 return convertDivUByPowerOfTwo(op, rewriter);
1705}
1706
1707LogicalResult ModUOp::canonicalize(ModUOp op, PatternRewriter &rewriter) {
1708 if (isOpTriviallyRecursive(op) || !op.getTwoState())
1709 return failure();
1710
1711 return convertModUByPowerOfTwo(op, rewriter);
1712}
1713
1714//===----------------------------------------------------------------------===//
1715// ConcatOp
1716//===----------------------------------------------------------------------===//
1717
1718// Constant folding
1719OpFoldResult ConcatOp::fold(FoldAdaptor adaptor) {
1720 if (isOpTriviallyRecursive(*this))
1721 return {};
1722
1723 if (getNumOperands() == 1)
1724 return getOperand(0);
1725
1726 // If all the operands are constant, we can fold.
1727 for (auto attr : adaptor.getInputs())
1728 if (!attr || !isa<IntegerAttr>(attr))
1729 return {};
1730
1731 // If we got here, we can constant fold.
1732 unsigned resultWidth = getType().getIntOrFloatBitWidth();
1733 APInt result(resultWidth, 0);
1734
1735 unsigned nextInsertion = resultWidth;
1736 // Insert each chunk into the result.
1737 for (auto attr : adaptor.getInputs()) {
1738 auto chunk = cast<IntegerAttr>(attr).getValue();
1739 nextInsertion -= chunk.getBitWidth();
1740 result.insertBits(chunk, nextInsertion);
1741 }
1742
1743 return getIntAttr(result, getContext());
1744}
1745
1746LogicalResult ConcatOp::canonicalize(ConcatOp op, PatternRewriter &rewriter) {
1747 if (isOpTriviallyRecursive(op))
1748 return failure();
1749
1750 auto inputs = op.getInputs();
1751 auto size = inputs.size();
1752 assert(size > 1 && "expected 2 or more operands");
1753
1754 // This function is used when we flatten neighboring operands of a
1755 // (variadic) concat into a new vesion of the concat. first/last indices
1756 // are inclusive.
1757 auto flattenConcat = [&](size_t firstOpIndex, size_t lastOpIndex,
1758 ValueRange replacements) -> LogicalResult {
1759 SmallVector<Value, 4> newOperands;
1760 newOperands.append(inputs.begin(), inputs.begin() + firstOpIndex);
1761 newOperands.append(replacements.begin(), replacements.end());
1762 newOperands.append(inputs.begin() + lastOpIndex + 1, inputs.end());
1763 if (newOperands.size() == 1)
1764 replaceOpAndCopyNamehint(rewriter, op, newOperands[0]);
1765 else
1766 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, op.getType(),
1767 newOperands);
1768 return success();
1769 };
1770
1771 Value commonOperand = inputs[0];
1772 for (size_t i = 0; i != size; ++i) {
1773 // Check to see if all operands are the same.
1774 if (inputs[i] != commonOperand)
1775 commonOperand = Value();
1776
1777 // If an operand to the concat is itself a concat, then we can fold them
1778 // together.
1779 if (auto subConcat = inputs[i].getDefiningOp<ConcatOp>())
1780 return flattenConcat(i, i, subConcat->getOperands());
1781
1782 // Check for canonicalization due to neighboring operands.
1783 if (i != 0) {
1784 // Merge neighboring constants.
1785 if (auto cst = inputs[i].getDefiningOp<hw::ConstantOp>()) {
1786 if (auto prevCst = inputs[i - 1].getDefiningOp<hw::ConstantOp>()) {
1787 unsigned prevWidth = prevCst.getValue().getBitWidth();
1788 unsigned thisWidth = cst.getValue().getBitWidth();
1789 auto resultCst = cst.getValue().zext(prevWidth + thisWidth);
1790 resultCst |= prevCst.getValue().zext(prevWidth + thisWidth)
1791 << thisWidth;
1792 Value replacement =
1793 hw::ConstantOp::create(rewriter, op.getLoc(), resultCst);
1794 return flattenConcat(i - 1, i, replacement);
1795 }
1796 }
1797
1798 // If the two operands are the same, turn them into a replicate.
1799 if (inputs[i] == inputs[i - 1]) {
1800 Value replacement =
1801 rewriter.createOrFold<ReplicateOp>(op.getLoc(), inputs[i], 2);
1802 return flattenConcat(i - 1, i, replacement);
1803 }
1804
1805 // If this input is a replicate, see if we can fold it with the previous
1806 // one.
1807 if (auto repl = inputs[i].getDefiningOp<ReplicateOp>()) {
1808 // ... x, repl(x, n), ... ==> ..., repl(x, n+1), ...
1809 if (repl.getOperand() == inputs[i - 1]) {
1810 Value replacement = rewriter.createOrFold<ReplicateOp>(
1811 op.getLoc(), repl.getOperand(), repl.getMultiple() + 1);
1812 return flattenConcat(i - 1, i, replacement);
1813 }
1814 // ... repl(x, n), repl(x, m), ... ==> ..., repl(x, n+m), ...
1815 if (auto prevRepl = inputs[i - 1].getDefiningOp<ReplicateOp>()) {
1816 if (prevRepl.getOperand() == repl.getOperand()) {
1817 Value replacement = rewriter.createOrFold<ReplicateOp>(
1818 op.getLoc(), repl.getOperand(),
1819 repl.getMultiple() + prevRepl.getMultiple());
1820 return flattenConcat(i - 1, i, replacement);
1821 }
1822 }
1823 }
1824
1825 // ... repl(x, n), x, ... ==> ..., repl(x, n+1), ...
1826 if (auto repl = inputs[i - 1].getDefiningOp<ReplicateOp>()) {
1827 if (repl.getOperand() == inputs[i]) {
1828 Value replacement = rewriter.createOrFold<ReplicateOp>(
1829 op.getLoc(), inputs[i], repl.getMultiple() + 1);
1830 return flattenConcat(i - 1, i, replacement);
1831 }
1832 }
1833
1834 // Merge neighboring extracts of neighboring inputs, e.g.
1835 // {A[3], A[2]} -> A[3:2]
1836 if (auto extract = inputs[i].getDefiningOp<ExtractOp>()) {
1837 if (auto prevExtract = inputs[i - 1].getDefiningOp<ExtractOp>()) {
1838 if (extract.getInput() == prevExtract.getInput()) {
1839 auto thisWidth = cast<IntegerType>(extract.getType()).getWidth();
1840 if (prevExtract.getLowBit() == extract.getLowBit() + thisWidth) {
1841 auto prevWidth = prevExtract.getType().getIntOrFloatBitWidth();
1842 auto resType = rewriter.getIntegerType(thisWidth + prevWidth);
1843 Value replacement =
1844 ExtractOp::create(rewriter, op.getLoc(), resType,
1845 extract.getInput(), extract.getLowBit());
1846 return flattenConcat(i - 1, i, replacement);
1847 }
1848 }
1849 }
1850 }
1851 // Merge neighboring array extracts of neighboring inputs, e.g.
1852 // {Array[4], bitcast(Array[3:2])} -> bitcast(A[4:2])
1853
1854 // This represents a slice of an array.
1855 struct ArraySlice {
1856 Value input;
1857 Value index;
1858 size_t width;
1859 static std::optional<ArraySlice> get(Value value) {
1860 assert(isa<IntegerType>(value.getType()) && "expected integer type");
1861 if (auto arrayGet = value.getDefiningOp<hw::ArrayGetOp>())
1862 return ArraySlice{arrayGet.getInput(), arrayGet.getIndex(), 1};
1863 // array slice op is wrapped with bitcast.
1864 if (auto bitcast = value.getDefiningOp<hw::BitcastOp>())
1865 if (auto arraySlice =
1866 bitcast.getInput().getDefiningOp<hw::ArraySliceOp>())
1867 return ArraySlice{
1868 arraySlice.getInput(), arraySlice.getLowIndex(),
1869 hw::type_cast<hw::ArrayType>(arraySlice.getType())
1870 .getNumElements()};
1871 return std::nullopt;
1872 }
1873 };
1874 if (auto extractOpt = ArraySlice::get(inputs[i])) {
1875 if (auto prevExtractOpt = ArraySlice::get(inputs[i - 1])) {
1876 // Check that two array slices are mergable.
1877 if (prevExtractOpt->index.getType() == extractOpt->index.getType() &&
1878 prevExtractOpt->input == extractOpt->input &&
1879 hw::isOffset(extractOpt->index, prevExtractOpt->index,
1880 extractOpt->width)) {
1881 auto resType = hw::ArrayType::get(
1882 hw::type_cast<hw::ArrayType>(prevExtractOpt->input.getType())
1883 .getElementType(),
1884 extractOpt->width + prevExtractOpt->width);
1885 auto resIntType = rewriter.getIntegerType(hw::getBitWidth(resType));
1886 Value replacement = hw::BitcastOp::create(
1887 rewriter, op.getLoc(), resIntType,
1888 hw::ArraySliceOp::create(rewriter, op.getLoc(), resType,
1889 prevExtractOpt->input,
1890 extractOpt->index));
1891 return flattenConcat(i - 1, i, replacement);
1892 }
1893 }
1894 }
1895 }
1896 }
1897
1898 // If all operands were the same, then this is a replicate.
1899 if (commonOperand) {
1900 replaceOpWithNewOpAndCopyNamehint<ReplicateOp>(rewriter, op, op.getType(),
1901 commonOperand);
1902 return success();
1903 }
1904
1905 return failure();
1906}
1907
1908//===----------------------------------------------------------------------===//
1909// MuxOp
1910//===----------------------------------------------------------------------===//
1911
1912OpFoldResult MuxOp::fold(FoldAdaptor adaptor) {
1913 if (isOpTriviallyRecursive(*this))
1914 return {};
1915
1916 // mux (c, b, b) -> b
1917 if (getTrueValue() == getFalseValue() && getTrueValue() != getResult())
1918 return getTrueValue();
1919 if (auto tv = adaptor.getTrueValue())
1920 if (tv == adaptor.getFalseValue())
1921 return tv;
1922
1923 // mux(0, a, b) -> b
1924 // mux(1, a, b) -> a
1925 if (auto pred = dyn_cast_or_null<IntegerAttr>(adaptor.getCond())) {
1926 if (pred.getValue().isZero() && getFalseValue() != getResult())
1927 return getFalseValue();
1928 if (pred.getValue().isOne() && getTrueValue() != getResult())
1929 return getTrueValue();
1930 }
1931
1932 // mux(cond, 1, 0) -> cond
1933 if (getCond().getType() == getTrueValue().getType())
1934 if (auto tv = dyn_cast_or_null<IntegerAttr>(adaptor.getTrueValue()))
1935 if (auto fv = dyn_cast_or_null<IntegerAttr>(adaptor.getFalseValue()))
1936 if (tv.getValue().isOne() && fv.getValue().isZero() &&
1937 hw::getBitWidth(getType()) == 1 && getCond() != getResult())
1938 return getCond();
1939
1940 return {};
1941}
1942
1943/// Check to see if the condition to the specified mux is an equality
1944/// comparison `indexValue` and one or more constants. If so, put the
1945/// constants in the constants vector and return true, otherwise return false.
1946///
1947/// This is part of foldMuxChain.
1948///
1949static bool
1950getMuxChainCondConstant(Value cond, Value indexValue, bool isInverted,
1951 std::function<void(hw::ConstantOp)> constantFn) {
1952 // Handle `idx == 42` and `idx != 42`.
1953 if (auto cmp = cond.getDefiningOp<ICmpOp>()) {
1954 // TODO: We could handle things like "x < 2" as two entries.
1955 auto requiredPredicate =
1956 (isInverted ? ICmpPredicate::eq : ICmpPredicate::ne);
1957 if (cmp.getLhs() == indexValue && cmp.getPredicate() == requiredPredicate) {
1958 if (auto cst = cmp.getRhs().getDefiningOp<hw::ConstantOp>()) {
1959 constantFn(cst);
1960 return true;
1961 }
1962 }
1963 return false;
1964 }
1965
1966 // Handle mux(`idx == 1 || idx == 3`, value, muxchain).
1967 if (auto orOp = cond.getDefiningOp<OrOp>()) {
1968 if (!isInverted)
1969 return false;
1970 for (auto operand : orOp.getOperands())
1971 if (!getMuxChainCondConstant(operand, indexValue, isInverted, constantFn))
1972 return false;
1973 return true;
1974 }
1975
1976 // Handle mux(`idx != 1 && idx != 3`, muxchain, value).
1977 if (auto andOp = cond.getDefiningOp<AndOp>()) {
1978 if (isInverted)
1979 return false;
1980 for (auto operand : andOp.getOperands())
1981 if (!getMuxChainCondConstant(operand, indexValue, isInverted, constantFn))
1982 return false;
1983 return true;
1984 }
1985
1986 return false;
1987}
1988
1989/// Given a mux, check to see if the "on true" value (or "on false" value if
1990/// isFalseSide=true) is a mux tree with the same condition. This allows us
1991/// to turn things like `mux(VAL == 0, A, (mux (VAL == 1), B, C))` into
1992/// `array_get (array_create(A, B, C), VAL)` or a balanced mux tree which is far
1993/// more compact and allows synthesis tools to do more interesting
1994/// optimizations.
1995///
1996/// This returns false if we cannot form the mux tree (or do not want to) and
1997/// returns true if the mux was replaced.
1999 PatternRewriter &rewriter, MuxOp rootMux, bool isFalseSide,
2000 llvm::function_ref<MuxChainWithComparisonFoldingStyle(size_t indexWidth,
2001 size_t numEntries)>
2002 styleFn) {
2003 // Get the index value being compared. Later we check to see if it is
2004 // compared to a constant with the right predicate.
2005 auto rootCmp = rootMux.getCond().getDefiningOp<ICmpOp>();
2006 if (!rootCmp)
2007 return false;
2008 Value indexValue = rootCmp.getLhs();
2009
2010 // Return the value to use if the equality match succeeds.
2011 auto getCaseValue = [&](MuxOp mux) -> Value {
2012 return mux.getOperand(1 + unsigned(!isFalseSide));
2013 };
2014
2015 // Return the value to use if the equality match fails. This is the next
2016 // mux in the sequence or the "otherwise" value.
2017 auto getTreeValue = [&](MuxOp mux) -> Value {
2018 return mux.getOperand(1 + unsigned(isFalseSide));
2019 };
2020
2021 // Start scanning the mux tree to see what we've got. Keep track of the
2022 // constant comparison value and the SSA value to use when equal to it.
2023 SmallVector<Location> locationsFound;
2024 SmallVector<std::pair<hw::ConstantOp, Value>, 4> valuesFound;
2025
2026 /// Extract constants and values into `valuesFound` and return true if this is
2027 /// part of the mux tree, otherwise return false.
2028 auto collectConstantValues = [&](MuxOp mux) -> bool {
2030 mux.getCond(), indexValue, isFalseSide, [&](hw::ConstantOp cst) {
2031 valuesFound.push_back({cst, getCaseValue(mux)});
2032 locationsFound.push_back(mux.getCond().getLoc());
2033 locationsFound.push_back(mux->getLoc());
2034 });
2035 };
2036
2037 // Make sure the root is a correct comparison with a constant.
2038 if (!collectConstantValues(rootMux))
2039 return false;
2040
2041 // Make sure that we're not looking at the intermediate node in a mux tree.
2042 if (rootMux->hasOneUse()) {
2043 if (auto userMux = dyn_cast<MuxOp>(*rootMux->user_begin())) {
2044 if (getTreeValue(userMux) == rootMux.getResult() &&
2045 getMuxChainCondConstant(userMux.getCond(), indexValue, isFalseSide,
2046 [&](hw::ConstantOp cst) {}))
2047 return false;
2048 }
2049 }
2050
2051 // Scan up the tree linearly.
2052 auto nextTreeValue = getTreeValue(rootMux);
2053 while (1) {
2054 auto nextMux = nextTreeValue.getDefiningOp<MuxOp>();
2055 if (!nextMux || !nextMux->hasOneUse())
2056 break;
2057 if (!collectConstantValues(nextMux))
2058 break;
2059 nextTreeValue = getTreeValue(nextMux);
2060 }
2061
2062 auto indexWidth = cast<IntegerType>(indexValue.getType()).getWidth();
2063
2064 if (indexWidth > 20)
2065 return false; // Too big to make a table.
2066
2067 auto foldingStyle = styleFn(indexWidth, valuesFound.size());
2068 if (foldingStyle == MuxChainWithComparisonFoldingStyle::None)
2069 return false;
2070
2071 uint64_t tableSize = 1ULL << indexWidth;
2072
2073 // Ok, we're going to do the transformation, start by building the table
2074 // filled with the "otherwise" value.
2075 SmallVector<Value, 8> table(tableSize, nextTreeValue);
2076
2077 // Fill in entries in the table from the leaf to the root of the expression.
2078 // This ensures that any duplicate matches end up with the ultimate value,
2079 // which is the one closer to the root.
2080 for (auto &elt : llvm::reverse(valuesFound)) {
2081 uint64_t idx = elt.first.getValue().getZExtValue();
2082 assert(idx < table.size() && "constant should be same bitwidth as index");
2083 table[idx] = elt.second;
2084 }
2085
2087 SmallVector<Value> bits;
2088 comb::extractBits(rewriter, indexValue, bits);
2089 auto result = constructMuxTree(rewriter, rootMux->getLoc(), bits, table,
2090 nextTreeValue);
2091 replaceOpAndCopyNamehint(rewriter, rootMux, result);
2092 return true;
2093 }
2094
2096 "unknown folding style");
2097
2098 // The hw.array_create operation has the operand list in unintuitive order
2099 // with a[0] stored as the last element, not the first.
2100 std::reverse(table.begin(), table.end());
2101
2102 // Build the array_create and the array_get.
2103 auto fusedLoc = rewriter.getFusedLoc(locationsFound);
2104 auto array = hw::ArrayCreateOp::create(rewriter, fusedLoc, table);
2105 replaceOpWithNewOpAndCopyNamehint<hw::ArrayGetOp>(rewriter, rootMux, array,
2106 indexValue);
2107 return true;
2108}
2109
2110/// Given a fully associative variadic operation like (a+b+c+d), break the
2111/// expression into two parts, one without the specified operand (e.g.
2112/// `tmp = a+b+d`) and one that combines that into the full expression (e.g.
2113/// `tmp+c`), and return the inner expression.
2114///
2115/// NOTE: This mutates the operation in place if it only has a single user,
2116/// which assumes that user will be removed.
2117///
2118static Value extractOperandFromFullyAssociative(Operation *fullyAssoc,
2119 size_t operandNo,
2120 PatternRewriter &rewriter) {
2121 assert(fullyAssoc->getNumOperands() >= 2 && "cannot split up unary ops");
2122 assert(operandNo < fullyAssoc->getNumOperands() && "Invalid operand #");
2123
2124 // If this expression already has two operands (the common case) no splitting
2125 // is necessary.
2126 if (fullyAssoc->getNumOperands() == 2)
2127 return fullyAssoc->getOperand(operandNo ^ 1);
2128
2129 // If the operation has a single use, mutate it in place.
2130 if (fullyAssoc->hasOneUse()) {
2131 rewriter.modifyOpInPlace(fullyAssoc,
2132 [&]() { fullyAssoc->eraseOperand(operandNo); });
2133 return fullyAssoc->getResult(0);
2134 }
2135
2136 // Form the new operation with the operands that remain.
2137 SmallVector<Value> operands;
2138 operands.append(fullyAssoc->getOperands().begin(),
2139 fullyAssoc->getOperands().begin() + operandNo);
2140 operands.append(fullyAssoc->getOperands().begin() + operandNo + 1,
2141 fullyAssoc->getOperands().end());
2142 Value opWithoutExcluded = createGenericOp(
2143 fullyAssoc->getLoc(), fullyAssoc->getName(), operands, rewriter);
2144 Value excluded = fullyAssoc->getOperand(operandNo);
2145
2146 Value fullResult =
2147 createGenericOp(fullyAssoc->getLoc(), fullyAssoc->getName(),
2148 ArrayRef<Value>{opWithoutExcluded, excluded}, rewriter);
2149 replaceOpAndCopyNamehint(rewriter, fullyAssoc, fullResult);
2150 return opWithoutExcluded;
2151}
2152
2153/// Fold things like `mux(cond, x|y|z|a, a)` -> `(x|y|z)&replicate(cond)|a` and
2154/// `mux(cond, a, x|y|z|a) -> `(x|y|z)&replicate(~cond) | a` (when isTrueOperand
2155/// is true. Return true on successful transformation, false if not.
2156///
2157/// These are various forms of "predicated ops" that can be handled with a
2158/// replicate/and combination.
2159static bool foldCommonMuxValue(MuxOp op, bool isTrueOperand,
2160 PatternRewriter &rewriter) {
2161 // Check to see the operand in question is an operation. If it is a port,
2162 // we can't simplify it.
2163 Operation *subExpr =
2164 (isTrueOperand ? op.getFalseValue() : op.getTrueValue()).getDefiningOp();
2165 if (!subExpr || subExpr->getNumOperands() < 2)
2166 return false;
2167
2168 // If this isn't an operation we can handle, don't spend energy on it.
2169 if (!isa<AndOp, XorOp, OrOp, MuxOp>(subExpr))
2170 return false;
2171
2172 // Check to see if the common value occurs in the operand list for the
2173 // subexpression op. If so, then we can simplify it.
2174 Value commonValue = isTrueOperand ? op.getTrueValue() : op.getFalseValue();
2175 size_t opNo = 0, e = subExpr->getNumOperands();
2176 while (opNo != e && subExpr->getOperand(opNo) != commonValue)
2177 ++opNo;
2178 if (opNo == e)
2179 return false;
2180
2181 // If we got a hit, then go ahead and simplify it!
2182 Value cond = op.getCond();
2183
2184 // `mux(cond, a, mux(cond2, a, b))` -> `mux(cond|cond2, a, b)`
2185 // `mux(cond, a, mux(cond2, b, a))` -> `mux(cond|~cond2, a, b)`
2186 // `mux(cond, mux(cond2, a, b), a)` -> `mux(~cond|cond2, a, b)`
2187 // `mux(cond, mux(cond2, b, a), a)` -> `mux(~cond|~cond2, a, b)`
2188 if (auto subMux = dyn_cast<MuxOp>(subExpr)) {
2189 if (subMux == op)
2190 return false;
2191
2192 Value otherValue;
2193 Value subCond = subMux.getCond();
2194
2195 // Invert th subCond if needed and dig out the 'b' value.
2196 if (subMux.getTrueValue() == commonValue)
2197 otherValue = subMux.getFalseValue();
2198 else if (subMux.getFalseValue() == commonValue) {
2199 otherValue = subMux.getTrueValue();
2200 subCond = createOrFoldNot(op.getLoc(), subCond, rewriter);
2201 } else {
2202 // We can't fold `mux(cond, a, mux(a, x, y))`.
2203 return false;
2204 }
2205
2206 // Invert the outer cond if needed, and combine the mux conditions.
2207 if (!isTrueOperand)
2208 cond = createOrFoldNot(op.getLoc(), cond, rewriter);
2209 cond = rewriter.createOrFold<OrOp>(op.getLoc(), cond, subCond, false);
2210 replaceOpWithNewOpAndCopyNamehint<MuxOp>(rewriter, op, cond, commonValue,
2211 otherValue, op.getTwoState());
2212 return true;
2213 }
2214
2215 // Invert the condition if needed. Or/Xor invert when dealing with
2216 // TrueOperand, And inverts for False operand.
2217 bool isaAndOp = isa<AndOp>(subExpr);
2218 if (isTrueOperand ^ isaAndOp)
2219 cond = createOrFoldNot(op.getLoc(), cond, rewriter);
2220
2221 auto extendedCond =
2222 rewriter.createOrFold<ReplicateOp>(op.getLoc(), op.getType(), cond);
2223
2224 // Cache this information before subExpr is erased by extraction below.
2225 bool isaXorOp = isa<XorOp>(subExpr);
2226 bool isaOrOp = isa<OrOp>(subExpr);
2227
2228 // Handle the fully associative ops, start by pulling out the subexpression
2229 // from a many operand version of the op.
2230 auto restOfAssoc =
2231 extractOperandFromFullyAssociative(subExpr, opNo, rewriter);
2232
2233 // `mux(cond, x|y|z|a, a)` -> `(x|y|z)&replicate(cond) | a`
2234 // `mux(cond, x^y^z^a, a)` -> `(x^y^z)&replicate(cond) ^ a`
2235 if (isaOrOp || isaXorOp) {
2236 auto masked = rewriter.createOrFold<AndOp>(op.getLoc(), extendedCond,
2237 restOfAssoc, false);
2238 if (isaXorOp)
2239 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, masked,
2240 commonValue, false);
2241 else
2242 replaceOpWithNewOpAndCopyNamehint<OrOp>(rewriter, op, masked, commonValue,
2243 false);
2244 return true;
2245 }
2246
2247 // `mux(cond, a, x&y&z&a)` -> `((x&y&z)|replicate(cond)) & a`
2248 assert(isaAndOp && "unexpected operation here");
2249 auto masked = rewriter.createOrFold<OrOp>(op.getLoc(), extendedCond,
2250 restOfAssoc, false);
2251 replaceOpWithNewOpAndCopyNamehint<AndOp>(rewriter, op, masked, commonValue,
2252 false);
2253 return true;
2254}
2255
2256/// This function is invoke when we find a mux with true/false operations that
2257/// have the same opcode. Check to see if we can strength reduce the mux by
2258/// applying it to less data by applying this transformation:
2259/// `mux(cond, op(a, b), op(a, c))` -> `op(a, mux(cond, b, c))`
2260static bool foldCommonMuxOperation(MuxOp mux, Operation *trueOp,
2261 Operation *falseOp,
2262 PatternRewriter &rewriter) {
2263 // Right now we only apply to concat.
2264 // TODO: Generalize this to and, or, xor, icmp(!), which all occur in practice
2265 if (!isa<ConcatOp>(trueOp))
2266 return false;
2267
2268 // Decode the operands, looking through recursive concats and replicates.
2269 SmallVector<Value> trueOperands, falseOperands;
2270 getConcatOperands(trueOp->getResult(0), trueOperands);
2271 getConcatOperands(falseOp->getResult(0), falseOperands);
2272
2273 size_t numTrueOperands = trueOperands.size();
2274 size_t numFalseOperands = falseOperands.size();
2275
2276 if (!numTrueOperands || !numFalseOperands ||
2277 (trueOperands.front() != falseOperands.front() &&
2278 trueOperands.back() != falseOperands.back()))
2279 return false;
2280
2281 // Pull all leading shared operands out into their own op if any are common.
2282 if (trueOperands.front() == falseOperands.front()) {
2283 SmallVector<Value> operands;
2284 size_t i;
2285 for (i = 0; i < numTrueOperands; ++i) {
2286 Value trueOperand = trueOperands[i];
2287 if (trueOperand == falseOperands[i])
2288 operands.push_back(trueOperand);
2289 else
2290 break;
2291 }
2292 if (i == numTrueOperands) {
2293 // Selecting between distinct, but lexically identical, concats.
2294 replaceOpAndCopyNamehint(rewriter, mux, trueOp->getResult(0));
2295 return true;
2296 }
2297
2298 Value sharedMSB;
2299 if (llvm::all_of(operands, [&](Value v) { return v == operands.front(); }))
2300 sharedMSB = rewriter.createOrFold<ReplicateOp>(
2301 mux->getLoc(), operands.front(), operands.size());
2302 else
2303 sharedMSB = rewriter.createOrFold<ConcatOp>(mux->getLoc(), operands);
2304 operands.clear();
2305
2306 // Get a concat of the LSB's on each side.
2307 operands.append(trueOperands.begin() + i, trueOperands.end());
2308 Value trueLSB = rewriter.createOrFold<ConcatOp>(trueOp->getLoc(), operands);
2309 operands.clear();
2310 operands.append(falseOperands.begin() + i, falseOperands.end());
2311 Value falseLSB =
2312 rewriter.createOrFold<ConcatOp>(falseOp->getLoc(), operands);
2313 // Merge the LSBs with a new mux and concat the MSB with the LSB to be
2314 // done.
2315 Value lsb = rewriter.createOrFold<MuxOp>(
2316 mux->getLoc(), mux.getCond(), trueLSB, falseLSB, mux.getTwoState());
2317 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, mux, sharedMSB, lsb);
2318 return true;
2319 }
2320
2321 // If trailing operands match, try to commonize them.
2322 if (trueOperands.back() == falseOperands.back()) {
2323 SmallVector<Value> operands;
2324 size_t i;
2325 for (i = 0;; ++i) {
2326 Value trueOperand = trueOperands[numTrueOperands - i - 1];
2327 if (trueOperand == falseOperands[numFalseOperands - i - 1])
2328 operands.push_back(trueOperand);
2329 else
2330 break;
2331 }
2332 std::reverse(operands.begin(), operands.end());
2333 Value sharedLSB = rewriter.createOrFold<ConcatOp>(mux->getLoc(), operands);
2334 operands.clear();
2335
2336 // Get a concat of the MSB's on each side.
2337 operands.append(trueOperands.begin(), trueOperands.end() - i);
2338 Value trueMSB = rewriter.createOrFold<ConcatOp>(trueOp->getLoc(), operands);
2339 operands.clear();
2340 operands.append(falseOperands.begin(), falseOperands.end() - i);
2341 Value falseMSB =
2342 rewriter.createOrFold<ConcatOp>(falseOp->getLoc(), operands);
2343 // Merge the MSBs with a new mux and concat the MSB with the LSB to be done.
2344 Value msb = rewriter.createOrFold<MuxOp>(
2345 mux->getLoc(), mux.getCond(), trueMSB, falseMSB, mux.getTwoState());
2346 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, mux, msb, sharedLSB);
2347 return true;
2348 }
2349
2350 return false;
2351}
2352
2353// If both arguments of the mux are arrays with the same elements, sink the
2354// mux and return a uniform array initializing all elements to it.
2355static bool foldMuxOfUniformArrays(MuxOp op, PatternRewriter &rewriter) {
2356 auto trueVec = op.getTrueValue().getDefiningOp<hw::ArrayCreateOp>();
2357 auto falseVec = op.getFalseValue().getDefiningOp<hw::ArrayCreateOp>();
2358 if (!trueVec || !falseVec)
2359 return false;
2360 if (!trueVec.isUniform() || !falseVec.isUniform())
2361 return false;
2362
2363 auto mux = MuxOp::create(rewriter, op.getLoc(), op.getCond(),
2364 trueVec.getUniformElement(),
2365 falseVec.getUniformElement(), op.getTwoState());
2366
2367 SmallVector<Value> values(trueVec.getInputs().size(), mux);
2368 rewriter.replaceOpWithNewOp<hw::ArrayCreateOp>(op, values);
2369 return true;
2370}
2371
2372/// If the mux condition is an operand to the op defining its true or false
2373/// value, replace the condition with 1 or 0.
2374static bool assumeMuxCondInOperand(Value muxCond, Value muxValue,
2375 bool constCond, PatternRewriter &rewriter) {
2376 if (!muxValue.hasOneUse())
2377 return false;
2378 auto *op = muxValue.getDefiningOp();
2379 if (!op || !isa_and_nonnull<CombDialect>(op->getDialect()))
2380 return false;
2381 if (!llvm::is_contained(op->getOperands(), muxCond))
2382 return false;
2383 OpBuilder::InsertionGuard guard(rewriter);
2384 rewriter.setInsertionPoint(op);
2385 auto condValue =
2386 hw::ConstantOp::create(rewriter, muxCond.getLoc(), APInt(1, constCond));
2387 rewriter.modifyOpInPlace(op, [&] {
2388 for (auto &use : op->getOpOperands())
2389 if (use.get() == muxCond)
2390 use.set(condValue);
2391 });
2392 return true;
2393}
2394
2395namespace {
2396struct MuxRewriter : public mlir::OpRewritePattern<MuxOp> {
2397 using OpRewritePattern::OpRewritePattern;
2398
2399 LogicalResult matchAndRewrite(MuxOp op,
2400 PatternRewriter &rewriter) const override;
2401};
2402
2404foldToArrayCreateOnlyWhenDense(size_t indexWidth, size_t numEntries) {
2405 // If the array is greater that 9 bits, it will take over 512 elements and
2406 // it will be too large for a single expression.
2407 if (indexWidth >= 9 || numEntries < 3)
2409
2410 // Next we need to see if the values are dense-ish. We don't want to have
2411 // a tremendous number of replicated entries in the array. Some sparsity is
2412 // ok though, so we require the table to be at least 5/8 utilized.
2413 uint64_t tableSize = 1ULL << indexWidth;
2414 if (numEntries >= tableSize * 5 / 8)
2417}
2418
2419LogicalResult MuxRewriter::matchAndRewrite(MuxOp op,
2420 PatternRewriter &rewriter) const {
2421 if (isOpTriviallyRecursive(op))
2422 return failure();
2423
2424 bool isSignlessInt = false;
2425 if (auto intType = dyn_cast<IntegerType>(op.getType()))
2426 isSignlessInt = intType.isSignless();
2427
2428 // If the op has a SV attribute, don't optimize it.
2429 if (hasSVAttributes(op))
2430 return failure();
2431 APInt value;
2432
2433 if (matchPattern(op.getTrueValue(), m_ConstantInt(&value)) && isSignlessInt) {
2434 if (value.getBitWidth() == 1) {
2435 // mux(a, 0, b) -> and(~a, b) for single-bit values.
2436 if (value.isZero()) {
2437 auto notCond = createOrFoldNot(op.getLoc(), op.getCond(), rewriter);
2438 replaceOpWithNewOpAndCopyNamehint<AndOp>(rewriter, op, notCond,
2439 op.getFalseValue(), false);
2440 return success();
2441 }
2442
2443 // mux(a, 1, b) -> or(a, b) for single-bit values.
2444 replaceOpWithNewOpAndCopyNamehint<OrOp>(rewriter, op, op.getCond(),
2445 op.getFalseValue(), false);
2446 return success();
2447 }
2448
2449 // Check for mux of two constants. There are many ways to simplify them.
2450 APInt value2;
2451 if (matchPattern(op.getFalseValue(), m_ConstantInt(&value2))) {
2452 // When both inputs are constants and differ by only one bit, we can
2453 // simplify by splitting the mux into up to three contiguous chunks: one
2454 // for the differing bit and up to two for the bits that are the same.
2455 // E.g. mux(a, 3'h2, 0) -> concat(0, mux(a, 1, 0), 0) -> concat(0, a, 0)
2456 APInt xorValue = value ^ value2;
2457 if (xorValue.isPowerOf2()) {
2458 unsigned leadingZeros = xorValue.countLeadingZeros();
2459 unsigned trailingZeros = value.getBitWidth() - leadingZeros - 1;
2460 SmallVector<Value, 3> operands;
2461
2462 // Concat operands go from MSB to LSB, so we handle chunks in reverse
2463 // order of bit indexes.
2464 // For the chunks that are identical (i.e. correspond to 0s in
2465 // xorValue), we can extract directly from either input value, and we
2466 // arbitrarily pick the trueValue().
2467
2468 if (leadingZeros > 0)
2469 operands.push_back(rewriter.createOrFold<ExtractOp>(
2470 op.getLoc(), op.getTrueValue(), trailingZeros + 1, leadingZeros));
2471
2472 // Handle the differing bit, which should simplify into either cond or
2473 // ~cond.
2474 auto v1 = rewriter.createOrFold<ExtractOp>(
2475 op.getLoc(), op.getTrueValue(), trailingZeros, 1);
2476 auto v2 = rewriter.createOrFold<ExtractOp>(
2477 op.getLoc(), op.getFalseValue(), trailingZeros, 1);
2478 operands.push_back(rewriter.createOrFold<MuxOp>(
2479 op.getLoc(), op.getCond(), v1, v2, false));
2480
2481 if (trailingZeros > 0)
2482 operands.push_back(rewriter.createOrFold<ExtractOp>(
2483 op.getLoc(), op.getTrueValue(), 0, trailingZeros));
2484
2485 replaceOpWithNewOpAndCopyNamehint<ConcatOp>(rewriter, op, op.getType(),
2486 operands);
2487 return success();
2488 }
2489
2490 // If the true value is all ones and the false is all zeros then we have a
2491 // replicate pattern.
2492 if (value.isAllOnes() && value2.isZero()) {
2493 replaceOpWithNewOpAndCopyNamehint<ReplicateOp>(
2494 rewriter, op, op.getType(), op.getCond());
2495 return success();
2496 }
2497 }
2498 }
2499
2500 if (matchPattern(op.getFalseValue(), m_ConstantInt(&value)) &&
2501 isSignlessInt && value.getBitWidth() == 1) {
2502 // mux(a, b, 0) -> and(a, b) for single-bit values.
2503 if (value.isZero()) {
2504 replaceOpWithNewOpAndCopyNamehint<AndOp>(rewriter, op, op.getCond(),
2505 op.getTrueValue(), false);
2506 return success();
2507 }
2508
2509 // mux(a, b, 1) -> or(~a, b) for single-bit values.
2510 // falseValue() is known to be a single-bit 1, which we can use for
2511 // the 1 in the representation of ~ using xor.
2512 auto notCond = rewriter.createOrFold<XorOp>(op.getLoc(), op.getCond(),
2513 op.getFalseValue(), false);
2514 replaceOpWithNewOpAndCopyNamehint<OrOp>(rewriter, op, notCond,
2515 op.getTrueValue(), false);
2516 return success();
2517 }
2518
2519 // mux(!a, b, c) -> mux(a, c, b)
2520 Value subExpr;
2521 Operation *condOp = op.getCond().getDefiningOp();
2522 if (condOp && matchPattern(condOp, m_Complement(m_Any(&subExpr))) &&
2523 op.getTwoState()) {
2524 replaceOpWithNewOpAndCopyNamehint<MuxOp>(rewriter, op, op.getType(),
2525 subExpr, op.getFalseValue(),
2526 op.getTrueValue(), true);
2527 return success();
2528 }
2529
2530 // Same but with Demorgan's law.
2531 // mux(and(~a, ~b, ~c), x, y) -> mux(or(a, b, c), y, x)
2532 // mux(or(~a, ~b, ~c), x, y) -> mux(and(a, b, c), y, x)
2533 if (condOp && condOp->hasOneUse()) {
2534 SmallVector<Value> invertedOperands;
2535
2536 /// Scan all the operands to see if they are complemented. If so, build a
2537 /// vector of them and return true, otherwise return false.
2538 auto getInvertedOperands = [&]() -> bool {
2539 for (Value operand : condOp->getOperands()) {
2540 if (matchPattern(operand, m_Complement(m_Any(&subExpr))))
2541 invertedOperands.push_back(subExpr);
2542 else
2543 return false;
2544 }
2545 return true;
2546 };
2547
2548 if (isa<AndOp>(condOp) && getInvertedOperands()) {
2549 auto newOr =
2550 rewriter.createOrFold<OrOp>(op.getLoc(), invertedOperands, false);
2551 replaceOpWithNewOpAndCopyNamehint<MuxOp>(
2552 rewriter, op, newOr, op.getFalseValue(), op.getTrueValue(),
2553 op.getTwoState());
2554 return success();
2555 }
2556 if (isa<OrOp>(condOp) && getInvertedOperands()) {
2557 auto newAnd =
2558 rewriter.createOrFold<AndOp>(op.getLoc(), invertedOperands, false);
2559 replaceOpWithNewOpAndCopyNamehint<MuxOp>(
2560 rewriter, op, newAnd, op.getFalseValue(), op.getTrueValue(),
2561 op.getTwoState());
2562 return success();
2563 }
2564 }
2565
2566 if (auto falseMux = op.getFalseValue().getDefiningOp<MuxOp>();
2567 falseMux && falseMux != op) {
2568 // mux(selector, x, mux(selector, y, z) = mux(selector, x, z)
2569 if (op.getCond() == falseMux.getCond() &&
2570 falseMux.getFalseValue() != falseMux) {
2571 replaceOpWithNewOpAndCopyNamehint<MuxOp>(
2572 rewriter, op, op.getCond(), op.getTrueValue(),
2573 falseMux.getFalseValue(), op.getTwoStateAttr());
2574 return success();
2575 }
2576
2577 // Check to see if we can fold a mux tree into an array_create/get pair.
2578 if (foldMuxChainWithComparison(rewriter, op, /*isFalse*/ true,
2579 foldToArrayCreateOnlyWhenDense))
2580 return success();
2581 }
2582
2583 if (auto trueMux = op.getTrueValue().getDefiningOp<MuxOp>();
2584 trueMux && trueMux != op) {
2585 // mux(selector, mux(selector, a, b), c) = mux(selector, a, c)
2586 if (op.getCond() == trueMux.getCond()) {
2587 replaceOpWithNewOpAndCopyNamehint<MuxOp>(
2588 rewriter, op, op.getCond(), trueMux.getTrueValue(),
2589 op.getFalseValue(), op.getTwoStateAttr());
2590 return success();
2591 }
2592
2593 // Check to see if we can fold a mux tree into an array_create/get pair.
2594 if (foldMuxChainWithComparison(rewriter, op, /*isFalseSide*/ false,
2595 foldToArrayCreateOnlyWhenDense))
2596 return success();
2597 }
2598
2599 // mux(c1, mux(c2, a, b), mux(c2, a, c)) -> mux(c2, a, mux(c1, b, c))
2600 if (auto trueMux = dyn_cast_or_null<MuxOp>(op.getTrueValue().getDefiningOp()),
2601 falseMux = dyn_cast_or_null<MuxOp>(op.getFalseValue().getDefiningOp());
2602 trueMux && falseMux && trueMux.getCond() == falseMux.getCond() &&
2603 trueMux.getTrueValue() == falseMux.getTrueValue() && trueMux != op &&
2604 falseMux != op) {
2605 auto subMux = MuxOp::create(
2606 rewriter, rewriter.getFusedLoc({trueMux.getLoc(), falseMux.getLoc()}),
2607 op.getCond(), trueMux.getFalseValue(), falseMux.getFalseValue());
2608 replaceOpWithNewOpAndCopyNamehint<MuxOp>(rewriter, op, trueMux.getCond(),
2609 trueMux.getTrueValue(), subMux,
2610 op.getTwoStateAttr());
2611 return success();
2612 }
2613
2614 // mux(c1, mux(c2, a, b), mux(c2, c, b)) -> mux(c2, mux(c1, a, c), b)
2615 if (auto trueMux = dyn_cast_or_null<MuxOp>(op.getTrueValue().getDefiningOp()),
2616 falseMux = dyn_cast_or_null<MuxOp>(op.getFalseValue().getDefiningOp());
2617 trueMux && falseMux && trueMux.getCond() == falseMux.getCond() &&
2618 trueMux.getFalseValue() == falseMux.getFalseValue() && trueMux != op &&
2619 falseMux != op) {
2620 auto subMux = MuxOp::create(
2621 rewriter, rewriter.getFusedLoc({trueMux.getLoc(), falseMux.getLoc()}),
2622 op.getCond(), trueMux.getTrueValue(), falseMux.getTrueValue());
2623 replaceOpWithNewOpAndCopyNamehint<MuxOp>(rewriter, op, trueMux.getCond(),
2624 subMux, trueMux.getFalseValue(),
2625 op.getTwoStateAttr());
2626 return success();
2627 }
2628
2629 // mux(c1, mux(c2, a, b), mux(c3, a, b)) -> mux(mux(c1, c2, c3), a, b)
2630 if (auto trueMux = dyn_cast_or_null<MuxOp>(op.getTrueValue().getDefiningOp()),
2631 falseMux = dyn_cast_or_null<MuxOp>(op.getFalseValue().getDefiningOp());
2632 trueMux && falseMux &&
2633 trueMux.getTrueValue() == falseMux.getTrueValue() &&
2634 trueMux.getFalseValue() == falseMux.getFalseValue() && trueMux != op &&
2635 falseMux != op) {
2636 auto subMux =
2637 MuxOp::create(rewriter,
2638 rewriter.getFusedLoc(
2639 {op.getLoc(), trueMux.getLoc(), falseMux.getLoc()}),
2640 op.getCond(), trueMux.getCond(), falseMux.getCond());
2641 replaceOpWithNewOpAndCopyNamehint<MuxOp>(
2642 rewriter, op, subMux, trueMux.getTrueValue(), trueMux.getFalseValue(),
2643 op.getTwoStateAttr());
2644 return success();
2645 }
2646
2647 // mux(cond, x|y|z|a, a) -> (x|y|z)&replicate(cond) | a
2648 if (foldCommonMuxValue(op, false, rewriter))
2649 return success();
2650 // mux(cond, a, x|y|z|a) -> (x|y|z)&replicate(~cond) | a
2651 if (foldCommonMuxValue(op, true, rewriter))
2652 return success();
2653
2654 // `mux(cond, op(a, b), op(a, c))` -> `op(a, mux(cond, b, c))`
2655 if (Operation *trueOp = op.getTrueValue().getDefiningOp())
2656 if (Operation *falseOp = op.getFalseValue().getDefiningOp())
2657 if (trueOp->getName() == falseOp->getName())
2658 if (foldCommonMuxOperation(op, trueOp, falseOp, rewriter))
2659 return success();
2660
2661 // extracts only of mux(...) -> mux(extract()...)
2662 if (narrowOperationWidth(op, true, rewriter))
2663 return success();
2664
2665 // mux(cond, repl(n, a1), repl(n, a2)) -> repl(n, mux(cond, a1, a2))
2666 if (foldMuxOfUniformArrays(op, rewriter))
2667 return success();
2668
2669 // mux(cond, opA(cond), opB(cond)) -> mux(cond, opA(1), opB(0))
2670 if (op.getTrueValue().getDefiningOp() &&
2671 op.getTrueValue().getDefiningOp() != op)
2672 if (assumeMuxCondInOperand(op.getCond(), op.getTrueValue(), true, rewriter))
2673 return success();
2674 if (op.getFalseValue().getDefiningOp() &&
2675 op.getFalseValue().getDefiningOp() != op)
2676
2677 if (assumeMuxCondInOperand(op.getCond(), op.getFalseValue(), false,
2678 rewriter))
2679 return success();
2680
2681 return failure();
2682}
2683
2684static bool foldArrayOfMuxes(hw::ArrayCreateOp op, PatternRewriter &rewriter) {
2685 // Do not fold uniform or singleton arrays to avoid duplicating muxes.
2686 if (op.getInputs().empty() || op.isUniform())
2687 return false;
2688 auto inputs = op.getInputs();
2689 if (inputs.size() <= 1)
2690 return false;
2691
2692 // Check the operands to the array create. Ensure all of them are the
2693 // same op with the same number of operands.
2694 auto first = inputs[0].getDefiningOp<comb::MuxOp>();
2695 if (!first || hasSVAttributes(first))
2696 return false;
2697
2698 // Check whether all operands are muxes with the same condition.
2699 for (size_t i = 1, n = inputs.size(); i < n; ++i) {
2700 auto input = inputs[i].getDefiningOp<comb::MuxOp>();
2701 if (!input || first.getCond() != input.getCond())
2702 return false;
2703 }
2704
2705 // Collect the true and the false branches into arrays.
2706 SmallVector<Value> trues{first.getTrueValue()};
2707 SmallVector<Value> falses{first.getFalseValue()};
2708 SmallVector<Location> locs{first->getLoc()};
2709 bool isTwoState = true;
2710 for (size_t i = 1, n = inputs.size(); i < n; ++i) {
2711 auto input = inputs[i].getDefiningOp<comb::MuxOp>();
2712 trues.push_back(input.getTrueValue());
2713 falses.push_back(input.getFalseValue());
2714 locs.push_back(input->getLoc());
2715 if (!input.getTwoState())
2716 isTwoState = false;
2717 }
2718
2719 // Define the location of the array create as the aggregate of all muxes.
2720 auto loc = FusedLoc::get(op.getContext(), locs);
2721
2722 // Replace the create with an aggregate operation. Push the create op
2723 // into the operands of the aggregate operation.
2724 auto arrayTy = op.getType();
2725 auto trueValues = hw::ArrayCreateOp::create(rewriter, loc, arrayTy, trues);
2726 auto falseValues = hw::ArrayCreateOp::create(rewriter, loc, arrayTy, falses);
2727 rewriter.replaceOpWithNewOp<comb::MuxOp>(op, arrayTy, first.getCond(),
2728 trueValues, falseValues, isTwoState);
2729 return true;
2730}
2731
2732struct ArrayRewriter : public mlir::OpRewritePattern<hw::ArrayCreateOp> {
2733 using OpRewritePattern::OpRewritePattern;
2734
2735 LogicalResult matchAndRewrite(hw::ArrayCreateOp op,
2736 PatternRewriter &rewriter) const override {
2737 if (foldArrayOfMuxes(op, rewriter))
2738 return success();
2739 return failure();
2740 }
2741};
2742
2743} // namespace
2744
2745void MuxOp::getCanonicalizationPatterns(RewritePatternSet &results,
2746 MLIRContext *context) {
2747 results.insert<MuxRewriter, ArrayRewriter>(context);
2748}
2749
2750//===----------------------------------------------------------------------===//
2751// ICmpOp
2752//===----------------------------------------------------------------------===//
2753
2754// Calculate the result of a comparison when the LHS and RHS are both
2755// constants.
2756static bool applyCmpPredicate(ICmpPredicate predicate, const APInt &lhs,
2757 const APInt &rhs) {
2758 switch (predicate) {
2759 case ICmpPredicate::eq:
2760 return lhs.eq(rhs);
2761 case ICmpPredicate::ne:
2762 return lhs.ne(rhs);
2763 case ICmpPredicate::slt:
2764 return lhs.slt(rhs);
2765 case ICmpPredicate::sle:
2766 return lhs.sle(rhs);
2767 case ICmpPredicate::sgt:
2768 return lhs.sgt(rhs);
2769 case ICmpPredicate::sge:
2770 return lhs.sge(rhs);
2771 case ICmpPredicate::ult:
2772 return lhs.ult(rhs);
2773 case ICmpPredicate::ule:
2774 return lhs.ule(rhs);
2775 case ICmpPredicate::ugt:
2776 return lhs.ugt(rhs);
2777 case ICmpPredicate::uge:
2778 return lhs.uge(rhs);
2779 case ICmpPredicate::ceq:
2780 return lhs.eq(rhs);
2781 case ICmpPredicate::cne:
2782 return lhs.ne(rhs);
2783 case ICmpPredicate::weq:
2784 return lhs.eq(rhs);
2785 case ICmpPredicate::wne:
2786 return lhs.ne(rhs);
2787 }
2788 llvm_unreachable("unknown comparison predicate");
2789}
2790
2791// Returns the result of applying the predicate when the LHS and RHS are the
2792// exact same value.
2793static bool applyCmpPredicateToEqualOperands(ICmpPredicate predicate) {
2794 switch (predicate) {
2795 case ICmpPredicate::eq:
2796 case ICmpPredicate::sle:
2797 case ICmpPredicate::sge:
2798 case ICmpPredicate::ule:
2799 case ICmpPredicate::uge:
2800 case ICmpPredicate::ceq:
2801 case ICmpPredicate::weq:
2802 return true;
2803 case ICmpPredicate::ne:
2804 case ICmpPredicate::slt:
2805 case ICmpPredicate::sgt:
2806 case ICmpPredicate::ult:
2807 case ICmpPredicate::ugt:
2808 case ICmpPredicate::cne:
2809 case ICmpPredicate::wne:
2810 return false;
2811 }
2812 llvm_unreachable("unknown comparison predicate");
2813}
2814
2815OpFoldResult ICmpOp::fold(FoldAdaptor adaptor) {
2816 // gt a, a -> false
2817 // gte a, a -> true
2818 if (getLhs() == getRhs()) {
2819 auto val = applyCmpPredicateToEqualOperands(getPredicate());
2820 return IntegerAttr::get(getType(), val);
2821 }
2822
2823 // gt 1, 2 -> false
2824 if (auto lhs = dyn_cast_or_null<IntegerAttr>(adaptor.getLhs())) {
2825 if (auto rhs = dyn_cast_or_null<IntegerAttr>(adaptor.getRhs())) {
2826 auto val =
2827 applyCmpPredicate(getPredicate(), lhs.getValue(), rhs.getValue());
2828 return IntegerAttr::get(getType(), val);
2829 }
2830 }
2831 return {};
2832}
2833
2834// Given a range of operands, computes the number of matching prefix and
2835// suffix elements. This does not perform cross-element matching.
2836template <typename Range>
2837static size_t computeCommonPrefixLength(const Range &a, const Range &b) {
2838 size_t commonPrefixLength = 0;
2839 auto ia = a.begin();
2840 auto ib = b.begin();
2841
2842 for (; ia != a.end() && ib != b.end(); ia++, ib++, commonPrefixLength++) {
2843 if (*ia != *ib) {
2844 break;
2845 }
2846 }
2847
2848 return commonPrefixLength;
2849}
2850
2851static size_t getTotalWidth(ArrayRef<Value> operands) {
2852 size_t totalWidth = 0;
2853 for (auto operand : operands) {
2854 // getIntOrFloatBitWidth should never raise, since all arguments to
2855 // ConcatOp are integers.
2856 ssize_t width = operand.getType().getIntOrFloatBitWidth();
2857 assert(width >= 0);
2858 totalWidth += width;
2859 }
2860 return totalWidth;
2861}
2862
2863/// Reduce the strength icmp(concat(...), concat(...)) by doing a element-wise
2864/// comparison on common prefix and suffixes. Returns success() if a rewriting
2865/// happens. This handles both concat and replicate.
2866static LogicalResult matchAndRewriteCompareConcat(ICmpOp op, Operation *lhs,
2867 Operation *rhs,
2868 PatternRewriter &rewriter) {
2869 // It is safe to assume that [{lhsOperands, rhsOperands}.size() > 0] and
2870 // all elements have non-zero length. Both these invariants are verified
2871 // by the ConcatOp verifier.
2872 SmallVector<Value> lhsOperands, rhsOperands;
2873 getConcatOperands(lhs->getResult(0), lhsOperands);
2874 getConcatOperands(rhs->getResult(0), rhsOperands);
2875 ArrayRef<Value> lhsOperandsRef = lhsOperands, rhsOperandsRef = rhsOperands;
2876
2877 auto formCatOrReplicate = [&](Location loc,
2878 ArrayRef<Value> operands) -> Value {
2879 assert(!operands.empty());
2880 Value sameElement = operands[0];
2881 for (size_t i = 1, e = operands.size(); i != e && sameElement; ++i)
2882 if (sameElement != operands[i])
2883 sameElement = Value();
2884 if (sameElement)
2885 return rewriter.createOrFold<ReplicateOp>(loc, sameElement,
2886 operands.size());
2887 return rewriter.createOrFold<ConcatOp>(loc, operands);
2888 };
2889
2890 auto replaceWith = [&](ICmpPredicate predicate, Value lhs,
2891 Value rhs) -> LogicalResult {
2892 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(rewriter, op, predicate, lhs, rhs,
2893 op.getTwoState());
2894 return success();
2895 };
2896
2897 size_t commonPrefixLength =
2898 computeCommonPrefixLength(lhsOperands, rhsOperands);
2899 if (commonPrefixLength == lhsOperands.size()) {
2900 // cat(a, b, c) == cat(a, b, c) -> 1
2901 bool result = applyCmpPredicateToEqualOperands(op.getPredicate());
2902 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
2903 APInt(1, result));
2904 return success();
2905 }
2906
2907 size_t commonSuffixLength = computeCommonPrefixLength(
2908 llvm::reverse(lhsOperandsRef), llvm::reverse(rhsOperandsRef));
2909
2910 size_t commonPrefixTotalWidth =
2911 getTotalWidth(lhsOperandsRef.take_front(commonPrefixLength));
2912 size_t commonSuffixTotalWidth =
2913 getTotalWidth(lhsOperandsRef.take_back(commonSuffixLength));
2914 auto lhsOnly = lhsOperandsRef.drop_front(commonPrefixLength)
2915 .drop_back(commonSuffixLength);
2916 auto rhsOnly = rhsOperandsRef.drop_front(commonPrefixLength)
2917 .drop_back(commonSuffixLength);
2918
2919 auto replaceWithoutReplicatingSignBit = [&]() {
2920 auto newLhs = formCatOrReplicate(lhs->getLoc(), lhsOnly);
2921 auto newRhs = formCatOrReplicate(rhs->getLoc(), rhsOnly);
2922 return replaceWith(op.getPredicate(), newLhs, newRhs);
2923 };
2924
2925 auto replaceWithReplicatingSignBit = [&]() {
2926 auto firstNonEmptyValue = lhsOperands[0];
2927 auto firstNonEmptyElemWidth =
2928 firstNonEmptyValue.getType().getIntOrFloatBitWidth();
2929 Value signBit = rewriter.createOrFold<ExtractOp>(
2930 op.getLoc(), firstNonEmptyValue, firstNonEmptyElemWidth - 1, 1);
2931
2932 auto newLhs = ConcatOp::create(rewriter, lhs->getLoc(), signBit, lhsOnly);
2933 auto newRhs = ConcatOp::create(rewriter, rhs->getLoc(), signBit, rhsOnly);
2934 return replaceWith(op.getPredicate(), newLhs, newRhs);
2935 };
2936
2937 if (ICmpOp::isPredicateSigned(op.getPredicate())) {
2938 // scmp(cat(..x, b), cat(..y, b)) == scmp(cat(..x), cat(..y))
2939 if (commonPrefixTotalWidth == 0 && commonSuffixTotalWidth > 0)
2940 return replaceWithoutReplicatingSignBit();
2941
2942 // scmp(cat(a, ..x, b), cat(a, ..y, b)) == scmp(cat(sgn(a), ..x),
2943 // cat(sgn(b), ..y)) Note that we cannot perform this optimization if
2944 // [width(b) = 0 && width(a) <= 1]. since that common prefix is the sign
2945 // bit. Doing the rewrite can result in an infinite loop.
2946 if (commonPrefixTotalWidth > 1 || commonSuffixTotalWidth > 0)
2947 return replaceWithReplicatingSignBit();
2948
2949 } else if (commonPrefixTotalWidth > 0 || commonSuffixTotalWidth > 0) {
2950 // ucmp(cat(a, ..x, b), cat(a, ..y, b)) = ucmp(cat(..x), cat(..y))
2951 return replaceWithoutReplicatingSignBit();
2952 }
2953
2954 return failure();
2955}
2956
2957/// Given an equality comparison with a constant value and some operand that has
2958/// known bits, simplify the comparison to check only the unknown bits of the
2959/// input.
2960///
2961/// One simple example of this is that `concat(0, stuff) == 0` can be simplified
2962/// to `stuff == 0`, or `and(x, 3) == 0` can be simplified to
2963/// `extract x[1:0] == 0`
2965 ICmpOp cmpOp, const KnownBits &bitAnalysis, const APInt &rhsCst,
2966 PatternRewriter &rewriter) {
2967
2968 // If any of the known bits disagree with any of the comparison bits, then
2969 // we can constant fold this comparison right away.
2970 APInt bitsKnown = bitAnalysis.Zero | bitAnalysis.One;
2971 if ((bitsKnown & rhsCst) != bitAnalysis.One) {
2972 // If we discover a mismatch then we know an "eq" comparison is false
2973 // and a "ne" comparison is true!
2974 bool result = cmpOp.getPredicate() == ICmpPredicate::ne;
2975 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, cmpOp,
2976 APInt(1, result));
2977 return;
2978 }
2979
2980 // Check to see if we can prove the result entirely of the comparison (in
2981 // which we bail out early), otherwise build a list of values to concat and a
2982 // smaller constant to compare against.
2983 SmallVector<Value> newConcatOperands;
2984 auto newConstant = APInt::getZeroWidth();
2985
2986 // Ok, some (maybe all) bits are known and some others may be unknown.
2987 // Extract out segments of the operand and compare against the
2988 // corresponding bits.
2989 unsigned knownMSB = bitsKnown.countLeadingOnes();
2990
2991 Value operand = cmpOp.getLhs();
2992
2993 // Ok, some bits are known but others are not. Extract out sequences of
2994 // bits that are unknown and compare just those bits. We work from MSB to
2995 // LSB.
2996 while (knownMSB != bitsKnown.getBitWidth()) {
2997 // Drop any high bits that are known.
2998 if (knownMSB)
2999 bitsKnown = bitsKnown.trunc(bitsKnown.getBitWidth() - knownMSB);
3000
3001 // Find the span of unknown bits, and extract it.
3002 unsigned unknownBits = bitsKnown.countLeadingZeros();
3003 unsigned lowBit = bitsKnown.getBitWidth() - unknownBits;
3004 auto spanOperand = rewriter.createOrFold<ExtractOp>(
3005 operand.getLoc(), operand, /*lowBit=*/lowBit,
3006 /*bitWidth=*/unknownBits);
3007 auto spanConstant = rhsCst.lshr(lowBit).trunc(unknownBits);
3008
3009 // Add this info to the concat we're generating.
3010 newConcatOperands.push_back(spanOperand);
3011 // FIXME(llvm merge, cc697fc292b0): concat doesn't work with zero bit values
3012 // newConstant = newConstant.concat(spanConstant);
3013 if (newConstant.getBitWidth() != 0)
3014 newConstant = newConstant.concat(spanConstant);
3015 else
3016 newConstant = spanConstant;
3017
3018 // Drop the unknown bits in prep for the next chunk.
3019 unsigned newWidth = bitsKnown.getBitWidth() - unknownBits;
3020 bitsKnown = bitsKnown.trunc(newWidth);
3021 knownMSB = bitsKnown.countLeadingOnes();
3022 }
3023
3024 // If all the operands to the concat are foldable then we have an identity
3025 // situation where all the sub-elements equal each other. This implies that
3026 // the overall result is foldable.
3027 if (newConcatOperands.empty()) {
3028 bool result = cmpOp.getPredicate() == ICmpPredicate::eq;
3029 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, cmpOp,
3030 APInt(1, result));
3031 return;
3032 }
3033
3034 // If we have a single operand remaining, use it, otherwise form a concat.
3035 Value concatResult =
3036 rewriter.createOrFold<ConcatOp>(operand.getLoc(), newConcatOperands);
3037
3038 // Form the comparison against the smaller constant.
3039 auto newConstantOp = hw::ConstantOp::create(
3040 rewriter, cmpOp.getOperand(1).getLoc(), newConstant);
3041
3042 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(rewriter, cmpOp,
3043 cmpOp.getPredicate(), concatResult,
3044 newConstantOp, cmpOp.getTwoState());
3045}
3046
3047// Simplify icmp eq(xor(a,b,cst1), cst2) -> icmp eq(xor(a,b), cst1^cst2).
3048static void combineEqualityICmpWithXorOfConstant(ICmpOp cmpOp, XorOp xorOp,
3049 const APInt &rhs,
3050 PatternRewriter &rewriter) {
3051 auto ip = rewriter.saveInsertionPoint();
3052 rewriter.setInsertionPoint(xorOp);
3053
3054 auto xorRHS = xorOp.getOperands().back().getDefiningOp<hw::ConstantOp>();
3055 auto newRHS = hw::ConstantOp::create(rewriter, xorRHS->getLoc(),
3056 xorRHS.getValue() ^ rhs);
3057 Value newLHS;
3058 switch (xorOp.getNumOperands()) {
3059 case 1:
3060 // This isn't common but is defined so we need to handle it.
3061 newLHS = hw::ConstantOp::create(rewriter, xorOp.getLoc(),
3062 APInt::getZero(rhs.getBitWidth()));
3063 break;
3064 case 2:
3065 // The binary case is the most common.
3066 newLHS = xorOp.getOperand(0);
3067 break;
3068 default:
3069 // The general case forces us to form a new xor with the remaining operands.
3070 SmallVector<Value> newOperands(xorOp.getOperands());
3071 newOperands.pop_back();
3072 newLHS = XorOp::create(rewriter, xorOp.getLoc(), newOperands, false);
3073 break;
3074 }
3075
3076 bool xorMultipleUses = !xorOp->hasOneUse();
3077
3078 // If the xor has multiple uses (not just the compare, then we need/want to
3079 // replace them as well.
3080 if (xorMultipleUses)
3081 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, xorOp, newLHS, xorRHS,
3082 false);
3083
3084 // Replace the comparison.
3085 rewriter.restoreInsertionPoint(ip);
3086 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(
3087 rewriter, cmpOp, cmpOp.getPredicate(), newLHS, newRHS, false);
3088}
3089
3090LogicalResult ICmpOp::canonicalize(ICmpOp op, PatternRewriter &rewriter) {
3091 if (isOpTriviallyRecursive(op))
3092 return failure();
3093 APInt lhs, rhs;
3094
3095 // icmp 1, x -> icmp x, 1
3096 if (matchPattern(op.getLhs(), m_ConstantInt(&lhs))) {
3097 assert(!matchPattern(op.getRhs(), m_ConstantInt(&rhs)) &&
3098 "Should be folded");
3099 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(
3100 rewriter, op, ICmpOp::getFlippedPredicate(op.getPredicate()),
3101 op.getRhs(), op.getLhs(), op.getTwoState());
3102 return success();
3103 }
3104
3105 // Canonicalize with RHS constant
3106 if (matchPattern(op.getRhs(), m_ConstantInt(&rhs))) {
3107 auto getConstant = [&](APInt constant) -> Value {
3108 return hw::ConstantOp::create(rewriter, op.getLoc(), std::move(constant));
3109 };
3110
3111 auto replaceWith = [&](ICmpPredicate predicate, Value lhs,
3112 Value rhs) -> LogicalResult {
3113 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(rewriter, op, predicate, lhs,
3114 rhs, op.getTwoState());
3115 return success();
3116 };
3117
3118 auto replaceWithConstantI1 = [&](bool constant) -> LogicalResult {
3119 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
3120 APInt(1, constant));
3121 return success();
3122 };
3123
3124 switch (op.getPredicate()) {
3125 case ICmpPredicate::slt:
3126 // x < max -> x != max
3127 if (rhs.isMaxSignedValue())
3128 return replaceWith(ICmpPredicate::ne, op.getLhs(), op.getRhs());
3129 // x < min -> false
3130 if (rhs.isMinSignedValue())
3131 return replaceWithConstantI1(0);
3132 // x < min+1 -> x == min
3133 if ((rhs - 1).isMinSignedValue())
3134 return replaceWith(ICmpPredicate::eq, op.getLhs(),
3135 getConstant(rhs - 1));
3136 break;
3137 case ICmpPredicate::sgt:
3138 // x > min -> x != min
3139 if (rhs.isMinSignedValue())
3140 return replaceWith(ICmpPredicate::ne, op.getLhs(), op.getRhs());
3141 // x > max -> false
3142 if (rhs.isMaxSignedValue())
3143 return replaceWithConstantI1(0);
3144 // x > max-1 -> x == max
3145 if ((rhs + 1).isMaxSignedValue())
3146 return replaceWith(ICmpPredicate::eq, op.getLhs(),
3147 getConstant(rhs + 1));
3148 break;
3149 case ICmpPredicate::ult:
3150 // x < max -> x != max
3151 if (rhs.isAllOnes())
3152 return replaceWith(ICmpPredicate::ne, op.getLhs(), op.getRhs());
3153 // x < min -> false
3154 if (rhs.isZero())
3155 return replaceWithConstantI1(0);
3156 // x < min+1 -> x == min
3157 if ((rhs - 1).isZero())
3158 return replaceWith(ICmpPredicate::eq, op.getLhs(),
3159 getConstant(rhs - 1));
3160
3161 // x < 0xE0 -> extract(x, 5..7) != 0b111
3162 if (rhs.countLeadingOnes() + rhs.countTrailingZeros() ==
3163 rhs.getBitWidth()) {
3164 auto numOnes = rhs.countLeadingOnes();
3165 auto smaller = ExtractOp::create(rewriter, op.getLoc(), op.getLhs(),
3166 rhs.getBitWidth() - numOnes, numOnes);
3167 return replaceWith(ICmpPredicate::ne, smaller,
3168 getConstant(APInt::getAllOnes(numOnes)));
3169 }
3170
3171 break;
3172 case ICmpPredicate::ugt:
3173 // x > min -> x != min
3174 if (rhs.isZero())
3175 return replaceWith(ICmpPredicate::ne, op.getLhs(), op.getRhs());
3176 // x > max -> false
3177 if (rhs.isAllOnes())
3178 return replaceWithConstantI1(0);
3179 // x > max-1 -> x == max
3180 if ((rhs + 1).isAllOnes())
3181 return replaceWith(ICmpPredicate::eq, op.getLhs(),
3182 getConstant(rhs + 1));
3183
3184 // x > 0x07 -> extract(x, 3..7) != 0b00000
3185 if ((rhs + 1).isPowerOf2()) {
3186 auto numOnes = rhs.countTrailingOnes();
3187 auto newWidth = rhs.getBitWidth() - numOnes;
3188 auto smaller = ExtractOp::create(rewriter, op.getLoc(), op.getLhs(),
3189 numOnes, newWidth);
3190 return replaceWith(ICmpPredicate::ne, smaller,
3191 getConstant(APInt::getZero(newWidth)));
3192 }
3193
3194 break;
3195 case ICmpPredicate::sle:
3196 // x <= max -> true
3197 if (rhs.isMaxSignedValue())
3198 return replaceWithConstantI1(1);
3199 // x <= c -> x < (c+1)
3200 return replaceWith(ICmpPredicate::slt, op.getLhs(), getConstant(rhs + 1));
3201 case ICmpPredicate::sge:
3202 // x >= min -> true
3203 if (rhs.isMinSignedValue())
3204 return replaceWithConstantI1(1);
3205 // x >= c -> x > (c-1)
3206 return replaceWith(ICmpPredicate::sgt, op.getLhs(), getConstant(rhs - 1));
3207 case ICmpPredicate::ule:
3208 // x <= max -> true
3209 if (rhs.isAllOnes())
3210 return replaceWithConstantI1(1);
3211 // x <= c -> x < (c+1)
3212 return replaceWith(ICmpPredicate::ult, op.getLhs(), getConstant(rhs + 1));
3213 case ICmpPredicate::uge:
3214 // x >= min -> true
3215 if (rhs.isZero())
3216 return replaceWithConstantI1(1);
3217 // x >= c -> x > (c-1)
3218 return replaceWith(ICmpPredicate::ugt, op.getLhs(), getConstant(rhs - 1));
3219 case ICmpPredicate::eq:
3220 if (rhs.getBitWidth() == 1) {
3221 if (rhs.isZero()) {
3222 // x == 0 -> x ^ 1
3223 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, op.getLhs(),
3224 getConstant(APInt(1, 1)),
3225 op.getTwoState());
3226 return success();
3227 }
3228 if (rhs.isAllOnes()) {
3229 // x == 1 -> x
3230 replaceOpAndCopyNamehint(rewriter, op, op.getLhs());
3231 return success();
3232 }
3233 }
3234 break;
3235 case ICmpPredicate::ne:
3236 if (rhs.getBitWidth() == 1) {
3237 if (rhs.isZero()) {
3238 // x != 0 -> x
3239 replaceOpAndCopyNamehint(rewriter, op, op.getLhs());
3240 return success();
3241 }
3242 if (rhs.isAllOnes()) {
3243 // x != 1 -> x ^ 1
3244 replaceOpWithNewOpAndCopyNamehint<XorOp>(rewriter, op, op.getLhs(),
3245 getConstant(APInt(1, 1)),
3246 op.getTwoState());
3247 return success();
3248 }
3249 }
3250 break;
3251 case ICmpPredicate::ceq:
3252 case ICmpPredicate::cne:
3253 case ICmpPredicate::weq:
3254 case ICmpPredicate::wne:
3255 break;
3256 }
3257
3258 // We have some specific optimizations for comparison with a constant that
3259 // are only supported for equality comparisons.
3260 if (op.getPredicate() == ICmpPredicate::eq ||
3261 op.getPredicate() == ICmpPredicate::ne) {
3262 // Simplify `icmp(value_with_known_bits, rhscst)` into some extracts
3263 // with a smaller constant. We only support equality comparisons for
3264 // this.
3265 auto knownBits = computeKnownBits(op.getLhs());
3266 if (!knownBits.isUnknown())
3267 return combineEqualityICmpWithKnownBitsAndConstant(op, knownBits, rhs,
3268 rewriter),
3269 success();
3270
3271 // Simplify icmp eq(xor(a,b,cst1), cst2) -> icmp eq(xor(a,b),
3272 // cst1^cst2).
3273 if (auto xorOp = op.getLhs().getDefiningOp<XorOp>())
3274 if (xorOp.getOperands().back().getDefiningOp<hw::ConstantOp>())
3275 return combineEqualityICmpWithXorOfConstant(op, xorOp, rhs, rewriter),
3276 success();
3277
3278 // Simplify icmp eq(replicate(v, n), c) -> icmp eq(v, c) if c is zero or
3279 // all one.
3280 if (auto replicateOp = op.getLhs().getDefiningOp<ReplicateOp>())
3281 if (rhs.isAllOnes() || rhs.isZero()) {
3282 auto width = replicateOp.getInput().getType().getIntOrFloatBitWidth();
3283 auto cst =
3284 hw::ConstantOp::create(rewriter, op.getLoc(),
3285 rhs.isAllOnes() ? APInt::getAllOnes(width)
3286 : APInt::getZero(width));
3287 replaceOpWithNewOpAndCopyNamehint<ICmpOp>(
3288 rewriter, op, op.getPredicate(), replicateOp.getInput(), cst,
3289 op.getTwoState());
3290 return success();
3291 }
3292 }
3293 }
3294
3295 // icmp(cat(prefix, a, b, suffix), cat(prefix, c, d, suffix)) => icmp(cat(a,
3296 // b), cat(c, d)). contains special handling for sign bit in signed
3297 // compressions.
3298 if (Operation *opLHS = op.getLhs().getDefiningOp())
3299 if (Operation *opRHS = op.getRhs().getDefiningOp())
3300 if (isa<ConcatOp, ReplicateOp>(opLHS) &&
3301 isa<ConcatOp, ReplicateOp>(opRHS)) {
3302 if (succeeded(matchAndRewriteCompareConcat(op, opLHS, opRHS, rewriter)))
3303 return success();
3304 }
3305
3306 return failure();
3307}
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
Definition CalyxOps.cpp:540
static KnownBits computeKnownBits(Value v, unsigned depth)
Given an integer SSA value, check to see if we know anything about the result of the computation.
static bool foldMuxOfUniformArrays(MuxOp op, PatternRewriter &rewriter)
static Attribute constFoldAssociativeOp(ArrayRef< Attribute > operands, hw::PEO paramOpcode)
static Attribute constFoldBinaryOp(ArrayRef< Attribute > operands, hw::PEO paramOpcode)
Performs constant folding calculate with element-wise behavior on the two attributes in operands and ...
static bool applyCmpPredicateToEqualOperands(ICmpPredicate predicate)
static ComplementMatcher< SubType > m_Complement(const SubType &subExpr)
Definition CombFolds.cpp:84
static bool canonicalizeLogicalCstWithConcat(Operation *logicalOp, size_t concatIdx, const APInt &cst, PatternRewriter &rewriter)
When we find a logical operation (and, or, xor) with a constant e.g.
static bool narrowOperationWidth(OpTy op, bool narrowTrailingBits, PatternRewriter &rewriter)
static OpFoldResult foldDiv(Op op, ArrayRef< Attribute > constants)
static Value getCommonOperand(Op op)
Returns a single common operand that all inputs of the operation op can be traced back to,...
static bool canCombineOppositeBinCmpIntoConstant(OperandRange operands)
static void getConcatOperands(Value v, SmallVectorImpl< Value > &result)
Flatten concat and mux operands into a vector.
Definition CombFolds.cpp:52
static Value extractOperandFromFullyAssociative(Operation *fullyAssoc, size_t operandNo, PatternRewriter &rewriter)
Given a fully associative variadic operation like (a+b+c+d), break the expression into two parts,...
static bool getMuxChainCondConstant(Value cond, Value indexValue, bool isInverted, std::function< void(hw::ConstantOp)> constantFn)
Check to see if the condition to the specified mux is an equality comparison indexValue and one or mo...
static TypedAttr getIntAttr(const APInt &value, MLIRContext *context)
Definition CombFolds.cpp:46
static bool shouldBeFlattened(Operation *op)
Return true if the op will be flattened afterwards.
Definition CombFolds.cpp:90
static void canonicalizeXorIcmpTrue(XorOp op, unsigned icmpOperand, PatternRewriter &rewriter)
static bool assumeMuxCondInOperand(Value muxCond, Value muxValue, bool constCond, PatternRewriter &rewriter)
If the mux condition is an operand to the op defining its true or false value, replace the condition ...
static bool extractFromReplicate(ExtractOp op, ReplicateOp replicate, PatternRewriter &rewriter)
static void combineEqualityICmpWithXorOfConstant(ICmpOp cmpOp, XorOp xorOp, const APInt &rhs, PatternRewriter &rewriter)
static size_t getTotalWidth(ArrayRef< Value > operands)
static bool foldCommonMuxOperation(MuxOp mux, Operation *trueOp, Operation *falseOp, PatternRewriter &rewriter)
This function is invoke when we find a mux with true/false operations that have the same opcode.
static std::pair< size_t, size_t > getLowestBitAndHighestBitRequired(Operation *op, bool narrowTrailingBits, size_t originalOpWidth)
static bool tryFlatteningOperands(Operation *op, PatternRewriter &rewriter)
Flattens a single input in op if hasOneUse is true and it can be defined as an Op.
static bool isOpTriviallyRecursive(Operation *op)
Definition CombFolds.cpp:27
static bool canonicalizeIdempotentInputs(Op op, PatternRewriter &rewriter)
Canonicalize an idempotent operation op so that only one input of any kind occurs.
static bool applyCmpPredicate(ICmpPredicate predicate, const APInt &lhs, const APInt &rhs)
static void combineEqualityICmpWithKnownBitsAndConstant(ICmpOp cmpOp, const KnownBits &bitAnalysis, const APInt &rhsCst, PatternRewriter &rewriter)
Given an equality comparison with a constant value and some operand that has known bits,...
static bool hasSVAttributes(Operation *op)
Definition CombFolds.cpp:67
static LogicalResult extractConcatToConcatExtract(ExtractOp op, ConcatOp innerCat, PatternRewriter &rewriter)
static OpFoldResult foldMod(Op op, ArrayRef< Attribute > constants)
static size_t computeCommonPrefixLength(const Range &a, const Range &b)
static bool foldCommonMuxValue(MuxOp op, bool isTrueOperand, PatternRewriter &rewriter)
Fold things like mux(cond, x|y|z|a, a) -> (x|y|z)&replicate(cond)|a and mux(cond, a,...
static LogicalResult matchAndRewriteCompareConcat(ICmpOp op, Operation *lhs, Operation *rhs, PatternRewriter &rewriter)
Reduce the strength icmp(concat(...), concat(...)) by doing a element-wise comparison on common prefi...
static Value createGenericOp(Location loc, OperationName name, ArrayRef< Value > operands, OpBuilder &builder)
Create a new instance of a generic operation that only has value operands, and has a single result va...
Definition CombFolds.cpp:38
static TypedAttr getIntAttr(MLIRContext *ctx, Type t, const APInt &value)
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
create(low_bit, result_type, input=None)
Definition comb.py:187
create(elements)
Definition hw.py:483
create(array_value, low_index, ret_type)
Definition hw.py:466
create(data_type, value)
Definition hw.py:441
create(data_type, value)
Definition hw.py:433
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition CalyxOps.cpp:55
void extractBits(OpBuilder &builder, Value val, SmallVectorImpl< Value > &bits)
Extract bits from a value.
Definition CombOps.cpp:78
bool foldMuxChainWithComparison(PatternRewriter &rewriter, MuxOp rootMux, bool isFalseSide, llvm::function_ref< MuxChainWithComparisonFoldingStyle(size_t indexWidth, size_t numEntries)> styleFn)
Mux chain folding that converts chains of muxes with index comparisons into array operations or balan...
Value createOrFoldNot(Location loc, Value value, OpBuilder &builder, bool twoState=false)
Create a `‘Not’' gate on a value.
Definition CombOps.cpp:66
MuxChainWithComparisonFoldingStyle
Enum for mux chain folding styles.
Definition CombOps.h:103
@ BalancedMuxTree
Definition CombOps.h:103
LogicalResult convertModUByPowerOfTwo(ModUOp modOp, mlir::PatternRewriter &rewriter)
KnownBits computeKnownBits(Value value)
Compute "known bits" information about the specified value - the set of bits that are guaranteed to a...
Value constructMuxTree(OpBuilder &builder, Location loc, ArrayRef< Value > selectors, ArrayRef< Value > leafNodes, Value outOfBoundsValue)
Construct a mux tree for given leaf nodes.
Definition CombOps.cpp:105
LogicalResult convertDivUByPowerOfTwo(DivUOp divOp, mlir::PatternRewriter &rewriter)
Convert unsigned division or modulo by a power of two.
uint64_t getWidth(Type t)
Definition ESIPasses.cpp:32
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
Definition Naming.cpp:73
Definition comb.py:1