33#include "mlir/Pass/Pass.h"
34#include "mlir/Transforms/DialectConversion.h"
35#include "llvm/ADT/APInt.h"
36#include "llvm/ADT/PointerUnion.h"
37#include "llvm/Support/Debug.h"
40#define DEBUG_TYPE "comb-to-synth"
43#define GEN_PASS_DEF_CONVERTCOMBTOSYNTH
44#include "circt/Conversion/Passes.h.inc"
55static SmallVector<Value>
extractBits(OpBuilder &builder, Value val) {
56 SmallVector<Value> bits;
57 comb::extractBits(builder, val, bits);
68template <
bool isLeftShift>
70 Value shiftAmount, int64_t maxShiftAmount,
71 llvm::function_ref<Value(int64_t)> getPadding,
72 llvm::function_ref<Value(int64_t)> getExtract) {
77 SmallVector<Value> nodes;
78 nodes.reserve(maxShiftAmount);
79 for (int64_t i = 0; i < maxShiftAmount; ++i) {
80 Value extract = getExtract(i);
81 Value padding = getPadding(i);
84 nodes.push_back(extract);
98 auto outOfBoundsValue = getPadding(maxShiftAmount);
99 assert(outOfBoundsValue &&
"outOfBoundsValue must be valid");
103 comb::constructMuxTree(rewriter, loc, bits, nodes, outOfBoundsValue);
106 auto inBound = rewriter.createOrFold<comb::ICmpOp>(
107 loc, ICmpPredicate::ult, shiftAmount,
111 return rewriter.createOrFold<
comb::MuxOp>(loc, inBound, result,
119 Value b, Value carry,
120 bool useMajorityInverterOp) {
121 if (useMajorityInverterOp) {
122 std::array<Value, 3> inputs = {a, b, carry};
123 std::array<bool, 3> inverts = {
false,
false,
false};
124 return synth::mig::MajorityInverterOp::create(rewriter, loc, inputs,
129 auto aXnorB = comb::XorOp::create(rewriter, loc, ValueRange{a, b},
true);
131 comb::AndOp::create(rewriter, loc, ValueRange{carry, aXnorB},
true);
132 auto aAndB = comb::AndOp::create(rewriter, loc, ValueRange{a, b},
true);
133 return comb::OrOp::create(rewriter, loc, ValueRange{andOp, aAndB},
true);
138 val.getLoc(), val, val.getType().getIntOrFloatBitWidth() - 1, 1);
143 val.getLoc(), val, 0, val.getType().getIntOrFloatBitWidth() - 1);
148using ConstantOrValue = llvm::PointerUnion<Value, mlir::IntegerAttr>;
153 Value value, llvm::SmallVectorImpl<ConstantOrValue> &values) {
155 if (value.getType().isInteger(0))
160 int64_t totalUnknownBits = 0;
161 for (
auto concatInput : llvm::reverse(
concat.getInputs())) {
166 totalUnknownBits += unknownBits;
168 return totalUnknownBits;
173 values.push_back(constant.getValueAttr());
179 values.push_back(value);
180 return hw::getBitWidth(value.getType());
186 llvm::SmallVectorImpl<ConstantOrValue> &constantOrValues,
188 uint32_t bitPos = 0, unknownPos = 0;
189 APInt result(width, 0);
190 for (
auto constantOrValue : constantOrValues) {
192 if (
auto constant = dyn_cast<IntegerAttr>(constantOrValue)) {
193 elemWidth = constant.getValue().getBitWidth();
194 result.insertBits(constant.getValue(), bitPos);
196 elemWidth = hw::getBitWidth(cast<Value>(constantOrValue).getType());
197 assert(elemWidth >= 0 &&
"unknown bit width");
198 assert(elemWidth + unknownPos < 32 &&
"unknown bit width too large");
200 uint32_t usedBits = (mask >> unknownPos) & ((1 << elemWidth) - 1);
201 result.insertBits(APInt(elemWidth, usedBits), bitPos);
202 unknownPos += elemWidth;
214 ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits,
216 llvm::function_ref<APInt(
const APInt &,
const APInt &)> emulate) {
217 SmallVector<ConstantOrValue> lhsValues, rhsValues;
219 assert(op->getNumResults() == 1 && op->getNumOperands() == 2 &&
220 "op must be a single result binary operation");
222 auto lhs = op->getOperand(0);
223 auto rhs = op->getOperand(1);
224 auto width = op->getResult(0).getType().getIntOrFloatBitWidth();
225 auto loc = op->getLoc();
230 if (numLhsUnknownBits < 0 || numRhsUnknownBits < 0)
233 int64_t totalUnknownBits = numLhsUnknownBits + numRhsUnknownBits;
234 if (totalUnknownBits > maxEmulationUnknownBits)
237 SmallVector<Value> emulatedResults;
238 emulatedResults.reserve(1 << totalUnknownBits);
241 DenseMap<IntegerAttr, hw::ConstantOp> constantPool;
243 auto attr = rewriter.getIntegerAttr(rewriter.getIntegerType(width), value);
244 auto it = constantPool.find(attr);
245 if (it != constantPool.end())
248 constantPool[attr] = constant;
252 for (uint32_t lhsMask = 0, lhsMaskEnd = 1 << numLhsUnknownBits;
253 lhsMask < lhsMaskEnd; ++lhsMask) {
255 for (uint32_t rhsMask = 0, rhsMaskEnd = 1 << numRhsUnknownBits;
256 rhsMask < rhsMaskEnd; ++rhsMask) {
259 emulatedResults.push_back(
getConstant(emulate(lhsValue, rhsValue)));
264 SmallVector<Value> selectors;
265 selectors.reserve(totalUnknownBits);
266 for (
auto &concatedValues : {rhsValues, lhsValues})
267 for (
auto valueOrConstant : concatedValues) {
268 auto value = dyn_cast<Value>(valueOrConstant);
274 assert(totalUnknownBits ==
static_cast<int64_t
>(selectors.size()) &&
275 "number of selectors must match");
276 auto muxed = constructMuxTree(rewriter, loc, selectors, emulatedResults,
294 matchAndRewrite(
AndOp op, OpAdaptor adaptor,
295 ConversionPatternRewriter &rewriter)
const override {
296 SmallVector<bool> nonInverts(adaptor.getInputs().size(),
false);
297 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
298 rewriter, op, adaptor.getInputs(), nonInverts);
308 matchAndRewrite(
OrOp op, OpAdaptor adaptor,
309 ConversionPatternRewriter &rewriter)
const override {
311 SmallVector<bool> allInverts(adaptor.getInputs().size(),
true);
312 auto andOp = synth::aig::AndInverterOp::create(
313 rewriter, op.getLoc(), adaptor.getInputs(), allInverts);
314 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
324 matchAndRewrite(
OrOp op, OpAdaptor adaptor,
325 ConversionPatternRewriter &rewriter)
const override {
326 if (op.getNumOperands() != 2)
328 SmallVector<Value, 3> inputs(adaptor.getInputs());
330 rewriter, op.getLoc(),
331 APInt::getAllOnes(hw::getBitWidth(op.getType())));
332 inputs.push_back(one);
333 std::array<bool, 3> inverts = {
false,
false,
false};
334 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
335 rewriter, op, inputs, inverts);
340struct AndInverterToMIGConversion
344 matchAndRewrite(synth::aig::AndInverterOp op, OpAdaptor adaptor,
345 ConversionPatternRewriter &rewriter)
const override {
346 if (op.getNumOperands() > 2)
348 if (op.getNumOperands() == 1) {
349 SmallVector<bool, 1> inverts{op.getInverted()[0]};
350 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
351 rewriter, op, adaptor.getInputs(), inverts);
354 SmallVector<Value, 3> inputs(adaptor.getInputs());
356 rewriter, op.getLoc(), APInt::getZero(hw::getBitWidth(op.getType())));
357 inputs.push_back(one);
358 SmallVector<bool, 3> inverts(adaptor.getInverted());
359 inverts.push_back(
false);
360 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
361 rewriter, op, inputs, inverts);
371 matchAndRewrite(
XorOp op, OpAdaptor adaptor,
372 ConversionPatternRewriter &rewriter)
const override {
373 if (op.getNumOperands() != 2)
379 auto inputs = adaptor.getInputs();
380 SmallVector<bool> allInverts(inputs.size(),
true);
381 SmallVector<bool> allNotInverts(inputs.size(),
false);
383 auto notAAndNotB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
385 auto aAndB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
386 inputs, allNotInverts);
388 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
389 rewriter, op, notAAndNotB, aAndB,
396template <
typename OpTy>
401 matchAndRewrite(OpTy op, OpAdaptor adaptor,
402 ConversionPatternRewriter &rewriter)
const override {
409 ConversionPatternRewriter &rewriter) {
411 switch (operands.size()) {
413 llvm_unreachable(
"cannot be called with empty operand range");
420 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs},
true);
422 auto firstHalf = operands.size() / 2;
427 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs},
true);
437 matchAndRewrite(
MuxOp op, OpAdaptor adaptor,
438 ConversionPatternRewriter &rewriter)
const override {
439 Value cond = op.getCond();
440 auto trueVal = op.getTrueValue();
441 auto falseVal = op.getFalseValue();
443 if (!op.getType().isInteger()) {
445 auto widthType = rewriter.getIntegerType(hw::getBitWidth(op.getType()));
453 if (!trueVal.getType().isInteger(1))
454 cond = comb::ReplicateOp::create(rewriter, op.getLoc(), trueVal.getType(),
459 synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond, trueVal);
460 auto rhs = synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond,
461 falseVal,
true,
false);
463 Value result = comb::OrOp::create(rewriter, op.getLoc(), lhs, rhs);
465 if (result.getType() != op.getType())
477enum AdderArchitecture { RippleCarry, Sklanskey, KoggeStone, BrentKung };
478AdderArchitecture determineAdderArch(Operation *op, int64_t width) {
479 auto strAttr = op->getAttrOfType<StringAttr>(
"synth.test.arch");
481 return llvm::StringSwitch<AdderArchitecture>(strAttr.getValue())
482 .Case(
"SKLANSKEY", Sklanskey)
483 .Case(
"KOGGE-STONE", KoggeStone)
484 .Case(
"BRENT-KUNG", BrentKung)
485 .Case(
"RIPPLE-CARRY", RippleCarry);
495 return AdderArchitecture::RippleCarry;
500 return AdderArchitecture::Sklanskey;
504 return AdderArchitecture::KoggeStone;
514void lowerKoggeStonePrefixTree(OpBuilder &builder, Location loc,
515 SmallVector<Value> &pPrefix,
516 SmallVector<Value> &gPrefix) {
518 auto width =
static_cast<int64_t
>(pPrefix.size());
519 assert(width ==
static_cast<int64_t
>(gPrefix.size()));
520 SmallVector<Value> pPrefixNew = pPrefix;
521 SmallVector<Value> gPrefixNew = gPrefix;
524 for (int64_t stride = 1; stride < width; stride *= 2) {
526 for (int64_t i = stride; i < width; ++i) {
527 int64_t j = i - stride;
530 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
531 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
534 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
537 pPrefix = pPrefixNew;
538 gPrefix = gPrefixNew;
543 for (int64_t stride = 1; stride < width; stride *= 2) {
545 <<
"--------------------------------------- Kogge-Stone Stage "
547 for (int64_t i = stride; i < width; ++i) {
548 int64_t j = i - stride;
550 llvm::dbgs() <<
"G" << i << stage + 1 <<
" = G" << i << stage
551 <<
" OR (P" << i << stage <<
" AND G" << j << stage
555 llvm::dbgs() <<
"P" << i << stage + 1 <<
" = P" << i << stage
556 <<
" AND P" << j << stage <<
"\n";
565void lowerSklanskeyPrefixTree(OpBuilder &builder, Location loc,
566 SmallVector<Value> &pPrefix,
567 SmallVector<Value> &gPrefix) {
568 auto width =
static_cast<int64_t
>(pPrefix.size());
569 assert(width ==
static_cast<int64_t
>(gPrefix.size()));
570 SmallVector<Value> pPrefixNew = pPrefix;
571 SmallVector<Value> gPrefixNew = gPrefix;
572 for (int64_t stride = 1; stride < width; stride *= 2) {
573 for (int64_t i = stride; i < width; i += 2 * stride) {
574 for (int64_t k = 0; k < stride && i + k < width; ++k) {
580 comb::AndOp::create(builder, loc, pPrefix[idx], gPrefix[j]);
581 gPrefixNew[idx] = comb::OrOp::create(builder, loc, gPrefix[idx], andPG);
585 comb::AndOp::create(builder, loc, pPrefix[idx], pPrefix[j]);
589 pPrefix = pPrefixNew;
590 gPrefix = gPrefixNew;
595 for (int64_t stride = 1; stride < width; stride *= 2) {
596 llvm::dbgs() <<
"--------------------------------------- Sklanskey Stage "
598 for (int64_t i = stride; i < width; i += 2 * stride) {
599 for (int64_t k = 0; k < stride && i + k < width; ++k) {
603 llvm::dbgs() <<
"G" << idx << stage + 1 <<
" = G" << idx << stage
604 <<
" OR (P" << idx << stage <<
" AND G" << j << stage
608 llvm::dbgs() <<
"P" << idx << stage + 1 <<
" = P" << idx << stage
609 <<
" AND P" << j << stage <<
"\n";
620void lowerBrentKungPrefixTree(OpBuilder &builder, Location loc,
621 SmallVector<Value> &pPrefix,
622 SmallVector<Value> &gPrefix) {
623 auto width =
static_cast<int64_t
>(pPrefix.size());
624 assert(width ==
static_cast<int64_t
>(gPrefix.size()));
625 SmallVector<Value> pPrefixNew = pPrefix;
626 SmallVector<Value> gPrefixNew = gPrefix;
630 for (stride = 1; stride < width; stride *= 2) {
631 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
632 int64_t j = i - stride;
635 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
636 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
639 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
641 pPrefix = pPrefixNew;
642 gPrefix = gPrefixNew;
646 for (; stride > 0; stride /= 2) {
647 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
648 int64_t j = i - stride;
651 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
652 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
655 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
657 pPrefix = pPrefixNew;
658 gPrefix = gPrefixNew;
663 for (stride = 1; stride < width; stride *= 2) {
664 llvm::dbgs() <<
"--------------------------------------- Brent-Kung FW "
665 << stage <<
" : Stride " << stride <<
"\n";
666 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
667 int64_t j = i - stride;
670 llvm::dbgs() <<
"G" << i << stage + 1 <<
" = G" << i << stage
671 <<
" OR (P" << i << stage <<
" AND G" << j << stage
675 llvm::dbgs() <<
"P" << i << stage + 1 <<
" = P" << i << stage
676 <<
" AND P" << j << stage <<
"\n";
681 for (; stride > 0; stride /= 2) {
682 if (stride * 3 - 1 < width)
683 llvm::dbgs() <<
"--------------------------------------- Brent-Kung BW "
684 << stage <<
" : Stride " << stride <<
"\n";
686 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
687 int64_t j = i - stride;
690 llvm::dbgs() <<
"G" << i << stage + 1 <<
" = G" << i << stage
691 <<
" OR (P" << i << stage <<
" AND G" << j << stage
695 llvm::dbgs() <<
"P" << i << stage + 1 <<
" = P" << i << stage
696 <<
" AND P" << j << stage <<
"\n";
704class LazyKoggeStonePrefixTree {
706 LazyKoggeStonePrefixTree(OpBuilder &builder, Location loc, int64_t width,
707 ArrayRef<Value> pPrefix, ArrayRef<Value> gPrefix)
708 : builder(builder), loc(loc), width(width) {
709 assert(width > 0 &&
"width must be positive");
710 for (int64_t i = 0; i < width; ++i)
711 prefixCache[{0, i}] = {pPrefix[i], gPrefix[i]};
715 std::pair<Value, Value> getFinal(int64_t i) {
716 assert(i >= 0 && i < width &&
"i out of bounds");
718 return getGroupAndPropagate(llvm::Log2_64_Ceil(width), i);
726 std::pair<Value, Value> getGroupAndPropagate(int64_t level, int64_t i);
730 DenseMap<std::pair<int64_t, int64_t>, std::pair<Value, Value>> prefixCache;
733std::pair<Value, Value>
734LazyKoggeStonePrefixTree::getGroupAndPropagate(int64_t level, int64_t i) {
735 assert(i < width &&
"i out of bounds");
736 auto key = std::make_pair(level, i);
737 auto it = prefixCache.find(key);
738 if (it != prefixCache.end())
741 assert(level > 0 &&
"If the level is 0, we should have hit the cache");
743 int64_t previousStride = 1ULL << (level - 1);
744 if (i < previousStride) {
746 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
747 prefixCache[key] = {propagateI, generateI};
748 return prefixCache[key];
751 int64_t j = i - previousStride;
752 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
753 auto [propagateJ, generateJ] = getGroupAndPropagate(level - 1, j);
755 Value andPG = comb::AndOp::create(builder, loc, propagateI, generateJ);
756 Value newGenerate = comb::OrOp::create(builder, loc, generateI, andPG);
759 comb::AndOp::create(builder, loc, propagateI, propagateJ);
760 prefixCache[key] = {newPropagate, newGenerate};
761 return prefixCache[key];
764template <
bool lowerToMIG>
769 matchAndRewrite(
AddOp op, OpAdaptor adaptor,
770 ConversionPatternRewriter &rewriter)
const override {
771 auto inputs = adaptor.getInputs();
774 if (inputs.size() != 2)
777 auto width = op.getType().getIntOrFloatBitWidth();
780 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
786 auto arch = determineAdderArch(op, width);
787 if (arch == AdderArchitecture::RippleCarry)
788 return lowerRippleCarryAdder(op, inputs, rewriter);
789 return lowerParallelPrefixAdder(op, inputs, rewriter);
794 lowerRippleCarryAdder(
comb::AddOp op, ValueRange inputs,
795 ConversionPatternRewriter &rewriter)
const {
796 auto width = op.getType().getIntOrFloatBitWidth();
802 SmallVector<Value> results;
803 results.resize(width);
804 for (int64_t i = 0; i < width; ++i) {
805 SmallVector<Value> xorOperands = {aBits[i], bBits[i]};
807 xorOperands.push_back(carry);
811 results[width - i - 1] =
812 comb::XorOp::create(rewriter, op.getLoc(), xorOperands,
true);
821 carry = comb::AndOp::create(rewriter, op.getLoc(),
822 ValueRange{aBits[i], bBits[i]},
true);
829 LLVM_DEBUG(llvm::dbgs() <<
"Lower comb.add to Ripple-Carry Adder of width "
832 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
840 lowerParallelPrefixAdder(
comb::AddOp op, ValueRange inputs,
841 ConversionPatternRewriter &rewriter)
const {
842 auto width = op.getType().getIntOrFloatBitWidth();
848 SmallVector<Value> p, g;
852 for (
auto [aBit, bBit] :
llvm::zip(aBits, bBits)) {
854 p.push_back(comb::XorOp::create(rewriter, op.getLoc(), aBit, bBit));
856 g.push_back(comb::AndOp::create(rewriter, op.getLoc(), aBit, bBit));
860 llvm::dbgs() <<
"Lower comb.add to Parallel-Prefix of width " << width
861 <<
"\n--------------------------------------- Init\n";
863 for (int64_t i = 0; i < width; ++i) {
865 llvm::dbgs() <<
"P0" << i <<
" = A" << i <<
" XOR B" << i <<
"\n";
867 llvm::dbgs() <<
"G0" << i <<
" = A" << i <<
" AND B" << i <<
"\n";
872 SmallVector<Value> pPrefix = p;
873 SmallVector<Value> gPrefix = g;
876 auto arch = determineAdderArch(op, width);
879 case AdderArchitecture::RippleCarry:
880 llvm_unreachable(
"Ripple-Carry should be handled separately");
882 case AdderArchitecture::Sklanskey:
883 lowerSklanskeyPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
885 case AdderArchitecture::KoggeStone:
886 lowerKoggeStonePrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
888 case AdderArchitecture::BrentKung:
889 lowerBrentKungPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
895 SmallVector<Value> results;
896 results.resize(width);
898 results[width - 1] = p[0];
902 for (int64_t i = 1; i < width; ++i)
903 results[width - 1 - i] =
904 comb::XorOp::create(rewriter, op.getLoc(), p[i], gPrefix[i - 1]);
906 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
909 llvm::dbgs() <<
"--------------------------------------- Completion\n"
911 for (int64_t i = 1; i < width; ++i)
912 llvm::dbgs() <<
"RES" << i <<
" = P" << i <<
" XOR G" << i - 1 <<
"\n";
923 matchAndRewrite(
MulOp op, OpAdaptor adaptor,
924 ConversionPatternRewriter &rewriter)
const override {
925 if (adaptor.getInputs().size() != 2)
928 Location loc = op.getLoc();
929 Value a = adaptor.getInputs()[0];
930 Value b = adaptor.getInputs()[1];
931 unsigned width = op.getType().getIntOrFloatBitWidth();
940 SmallVector<Value> aBits =
extractBits(rewriter, a);
941 SmallVector<Value> bBits =
extractBits(rewriter, b);
946 SmallVector<SmallVector<Value>> partialProducts;
947 partialProducts.reserve(width);
948 for (
unsigned i = 0; i < width; ++i) {
949 SmallVector<Value> row(i, falseValue);
952 for (
unsigned j = 0; i + j < width; ++j)
954 rewriter.createOrFold<
comb::AndOp>(loc, aBits[j], bBits[i]));
956 partialProducts.push_back(row);
961 rewriter.replaceOp(op, partialProducts[0][0]);
967 auto addends = comp.compressToHeight(rewriter, 2);
970 auto newAdd = comb::AddOp::create(rewriter, loc, addends,
true);
976template <
typename OpTy>
978 DivModOpConversionBase(MLIRContext *context, int64_t maxEmulationUnknownBits)
980 maxEmulationUnknownBits(maxEmulationUnknownBits) {
981 assert(maxEmulationUnknownBits < 32 &&
982 "maxEmulationUnknownBits must be less than 32");
984 const int64_t maxEmulationUnknownBits;
987struct CombDivUOpConversion : DivModOpConversionBase<DivUOp> {
988 using DivModOpConversionBase<
DivUOp>::DivModOpConversionBase;
990 matchAndRewrite(
DivUOp op, OpAdaptor adaptor,
991 ConversionPatternRewriter &rewriter)
const override {
993 if (
auto rhsConstantOp = adaptor.getRhs().getDefiningOp<
hw::ConstantOp>())
994 if (rhsConstantOp.getValue().isPowerOf2()) {
996 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
997 size_t width = op.getType().getIntOrFloatBitWidth();
999 op.getLoc(), adaptor.getLhs(), extractAmount,
1000 width - extractAmount);
1002 APInt::getZero(extractAmount));
1003 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
1004 rewriter, op, op.getType(), ArrayRef<Value>{constZero, upperBits});
1011 rewriter, maxEmulationUnknownBits, op,
1012 [](
const APInt &lhs,
const APInt &rhs) {
1015 return APInt::getZero(rhs.getBitWidth());
1016 return lhs.udiv(rhs);
1021struct CombModUOpConversion : DivModOpConversionBase<ModUOp> {
1022 using DivModOpConversionBase<
ModUOp>::DivModOpConversionBase;
1024 matchAndRewrite(
ModUOp op, OpAdaptor adaptor,
1025 ConversionPatternRewriter &rewriter)
const override {
1027 if (
auto rhsConstantOp = adaptor.getRhs().getDefiningOp<
hw::ConstantOp>())
1028 if (rhsConstantOp.getValue().isPowerOf2()) {
1030 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
1031 size_t width = op.getType().getIntOrFloatBitWidth();
1033 op.getLoc(), adaptor.getLhs(), 0, extractAmount);
1035 rewriter, op.getLoc(), APInt::getZero(width - extractAmount));
1036 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
1037 rewriter, op, op.getType(), ArrayRef<Value>{constZero, lowerBits});
1044 rewriter, maxEmulationUnknownBits, op,
1045 [](
const APInt &lhs,
const APInt &rhs) {
1048 return APInt::getZero(rhs.getBitWidth());
1049 return lhs.urem(rhs);
1054struct CombDivSOpConversion : DivModOpConversionBase<DivSOp> {
1055 using DivModOpConversionBase<
DivSOp>::DivModOpConversionBase;
1058 matchAndRewrite(
DivSOp op, OpAdaptor adaptor,
1059 ConversionPatternRewriter &rewriter)
const override {
1063 rewriter, maxEmulationUnknownBits, op,
1064 [](
const APInt &lhs,
const APInt &rhs) {
1067 return APInt::getZero(rhs.getBitWidth());
1068 return lhs.sdiv(rhs);
1073struct CombModSOpConversion : DivModOpConversionBase<ModSOp> {
1074 using DivModOpConversionBase<
ModSOp>::DivModOpConversionBase;
1076 matchAndRewrite(
ModSOp op, OpAdaptor adaptor,
1077 ConversionPatternRewriter &rewriter)
const override {
1081 rewriter, maxEmulationUnknownBits, op,
1082 [](
const APInt &lhs,
const APInt &rhs) {
1085 return APInt::getZero(rhs.getBitWidth());
1086 return lhs.srem(rhs);
1095 static Value constructRippleCarry(Location loc, Value a, Value b,
1097 ConversionPatternRewriter &rewriter) {
1105 for (
auto [aBit, bBit] :
llvm::zip(aBits, bBits)) {
1107 rewriter.createOrFold<
comb::XorOp>(loc, aBit, bBit,
true);
1108 auto aEqualB = rewriter.createOrFold<synth::aig::AndInverterOp>(
1109 loc, aBitXorBBit,
true);
1110 auto pred = rewriter.createOrFold<synth::aig::AndInverterOp>(
1111 loc, aBit, bBit,
true,
false);
1113 auto aBitAndBBit = rewriter.createOrFold<
comb::AndOp>(
1114 loc, ValueRange{aEqualB, acc},
true);
1115 acc = rewriter.createOrFold<
comb::OrOp>(loc, pred, aBitAndBBit,
true);
1128 static Value computePrefixComparison(ConversionPatternRewriter &rewriter,
1129 Location loc, SmallVector<Value> pPrefix,
1130 SmallVector<Value> gPrefix,
1131 bool includeEq, AdderArchitecture arch) {
1132 auto width = pPrefix.size();
1133 Value finalGroup, finalPropagate;
1136 case AdderArchitecture::RippleCarry:
1137 llvm_unreachable(
"Ripple-Carry should be handled separately");
1139 case AdderArchitecture::Sklanskey: {
1140 lowerSklanskeyPrefixTree(rewriter, loc, pPrefix, gPrefix);
1141 finalGroup = gPrefix[width - 1];
1142 finalPropagate = pPrefix[width - 1];
1145 case AdderArchitecture::KoggeStone:
1148 std::tie(finalPropagate, finalGroup) =
1149 LazyKoggeStonePrefixTree(rewriter, loc, width, pPrefix, gPrefix)
1150 .getFinal(width - 1);
1152 case AdderArchitecture::BrentKung: {
1153 lowerBrentKungPrefixTree(rewriter, loc, pPrefix, gPrefix);
1154 finalGroup = gPrefix[width - 1];
1155 finalPropagate = pPrefix[width - 1];
1164 return comb::OrOp::create(rewriter, loc, finalGroup, finalPropagate);
1173 static Value constructUnsignedCompare(Operation *op, Location loc, Value a,
1174 Value b,
bool isLess,
bool includeEq,
1175 ConversionPatternRewriter &rewriter) {
1179 auto width = a.getType().getIntOrFloatBitWidth();
1182 auto arch = determineAdderArch(op, width);
1183 if (arch == AdderArchitecture::RippleCarry)
1184 return constructRippleCarry(loc, a, b, includeEq, rewriter);
1195 SmallVector<Value> eq, gt;
1202 for (
auto [aBit, bBit] :
llvm::zip(aBits, bBits)) {
1204 auto xorBit = comb::XorOp::create(rewriter, loc, aBit, bBit);
1205 eq.push_back(comb::XorOp::create(rewriter, loc, xorBit, one));
1208 auto notA = comb::XorOp::create(rewriter, loc, aBit, one);
1209 gt.push_back(comb::AndOp::create(rewriter, loc, notA, bBit));
1212 return computePrefixComparison(rewriter, loc, std::move(eq), std::move(gt),
1217 matchAndRewrite(ICmpOp op, OpAdaptor adaptor,
1218 ConversionPatternRewriter &rewriter)
const override {
1219 auto lhs = adaptor.getLhs();
1220 auto rhs = adaptor.getRhs();
1222 switch (op.getPredicate()) {
1226 case ICmpPredicate::eq:
1227 case ICmpPredicate::ceq: {
1229 auto xorOp = rewriter.createOrFold<
comb::XorOp>(op.getLoc(), lhs, rhs);
1231 SmallVector<bool> allInverts(xorBits.size(),
true);
1232 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
1233 rewriter, op, xorBits, allInverts);
1237 case ICmpPredicate::ne:
1238 case ICmpPredicate::cne: {
1240 auto xorOp = rewriter.createOrFold<
comb::XorOp>(op.getLoc(), lhs, rhs);
1241 replaceOpWithNewOpAndCopyNamehint<comb::OrOp>(
1242 rewriter, op,
extractBits(rewriter, xorOp),
true);
1246 case ICmpPredicate::uge:
1247 case ICmpPredicate::ugt:
1248 case ICmpPredicate::ule:
1249 case ICmpPredicate::ult: {
1250 bool isLess = op.getPredicate() == ICmpPredicate::ult ||
1251 op.getPredicate() == ICmpPredicate::ule;
1252 bool includeEq = op.getPredicate() == ICmpPredicate::uge ||
1253 op.getPredicate() == ICmpPredicate::ule;
1255 constructUnsignedCompare(op, op.getLoc(), lhs,
1256 rhs, isLess, includeEq,
1260 case ICmpPredicate::slt:
1261 case ICmpPredicate::sle:
1262 case ICmpPredicate::sgt:
1263 case ICmpPredicate::sge: {
1264 if (lhs.getType().getIntOrFloatBitWidth() == 0)
1265 return rewriter.notifyMatchFailure(
1266 op.getLoc(),
"i0 signed comparison is unsupported");
1267 bool isLess = op.getPredicate() == ICmpPredicate::slt ||
1268 op.getPredicate() == ICmpPredicate::sle;
1269 bool includeEq = op.getPredicate() == ICmpPredicate::sge ||
1270 op.getPredicate() == ICmpPredicate::sle;
1279 auto sameSignResult = constructUnsignedCompare(
1280 op, op.getLoc(), aRest, bRest, isLess, includeEq, rewriter);
1284 comb::XorOp::create(rewriter, op.getLoc(), signA, signB);
1287 Value diffSignResult = isLess ? signA : signB;
1290 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
1291 rewriter, op, signsDiffer, diffSignResult, sameSignResult);
1302 matchAndRewrite(
ParityOp op, OpAdaptor adaptor,
1303 ConversionPatternRewriter &rewriter)
const override {
1305 replaceOpWithNewOpAndCopyNamehint<comb::XorOp>(
1306 rewriter, op,
extractBits(rewriter, adaptor.getInput()),
true);
1315 matchAndRewrite(
comb::ShlOp op, OpAdaptor adaptor,
1316 ConversionPatternRewriter &rewriter)
const override {
1317 auto width = op.getType().getIntOrFloatBitWidth();
1318 auto lhs = adaptor.getLhs();
1320 rewriter, op.getLoc(), adaptor.getRhs(), width,
1322 [&](int64_t index) {
1328 op.getLoc(), rewriter.getIntegerType(index), 0);
1331 [&](int64_t index) {
1332 assert(index < width &&
"index out of bounds");
1348 ConversionPatternRewriter &rewriter)
const override {
1349 auto width = op.getType().getIntOrFloatBitWidth();
1350 auto lhs = adaptor.getLhs();
1352 rewriter, op.getLoc(), adaptor.getRhs(), width,
1354 [&](int64_t index) {
1360 op.getLoc(), rewriter.getIntegerType(index), 0);
1363 [&](int64_t index) {
1364 assert(index < width &&
"index out of bounds");
1366 return rewriter.createOrFold<
comb::ExtractOp>(op.getLoc(), lhs, index,
1380 ConversionPatternRewriter &rewriter)
const override {
1381 auto width = op.getType().getIntOrFloatBitWidth();
1383 return rewriter.notifyMatchFailure(op.getLoc(),
1384 "i0 signed shift is unsupported");
1385 auto lhs = adaptor.getLhs();
1388 rewriter.createOrFold<
comb::ExtractOp>(op.getLoc(), lhs, width - 1, 1);
1393 rewriter, op.getLoc(), adaptor.getRhs(), width - 1,
1395 [&](int64_t index) {
1396 return rewriter.createOrFold<comb::ReplicateOp>(op.getLoc(), sign,
1400 [&](int64_t index) {
1401 return rewriter.createOrFold<
comb::ExtractOp>(op.getLoc(), lhs, index,
1417struct ConvertCombToSynthPass
1418 :
public impl::ConvertCombToSynthBase<ConvertCombToSynthPass> {
1419 void runOnOperation()
override;
1420 using ConvertCombToSynthBase<ConvertCombToSynthPass>::ConvertCombToSynthBase;
1426 uint32_t maxEmulationUnknownBits,
1430 CombAndOpConversion, CombXorOpConversion, CombMuxOpConversion,
1431 CombParityOpConversion,
1433 CombMulOpConversion, CombICmpOpConversion,
1435 CombShlOpConversion, CombShrUOpConversion, CombShrSOpConversion,
1437 CombLowerVariadicOp<XorOp>, CombLowerVariadicOp<AddOp>,
1438 CombLowerVariadicOp<MulOp>>(
patterns.getContext());
1440 patterns.add(comb::convertSubToAdd);
1443 patterns.add<CombOrToMIGConversion, CombLowerVariadicOp<OrOp>,
1444 AndInverterToMIGConversion,
1446 CombAddOpConversion<
true>>(
patterns.getContext());
1448 patterns.add<CombOrToAIGConversion, CombAddOpConversion<
false>>(
1453 patterns.add<CombDivUOpConversion, CombModUOpConversion, CombDivSOpConversion,
1454 CombModSOpConversion>(
patterns.getContext(),
1455 maxEmulationUnknownBits);
1458void ConvertCombToSynthPass::runOnOperation() {
1459 ConversionTarget target(getContext());
1462 target.addIllegalDialect<comb::CombDialect>();
1472 hw::AggregateConstantOp>();
1474 target.addLegalDialect<synth::SynthDialect>();
1476 if (targetIR == CombToSynthTargetIR::AIG) {
1478 target.addIllegalOp<synth::mig::MajorityInverterOp>();
1479 }
else if (targetIR == CombToSynthTargetIR::MIG) {
1480 target.addIllegalOp<synth::aig::AndInverterOp>();
1484 if (!additionalLegalOps.empty())
1485 for (
const auto &opName : additionalLegalOps)
1486 target.addLegalOp(OperationName(opName, &getContext()));
1488 RewritePatternSet
patterns(&getContext());
1490 targetIR == CombToSynthTargetIR::MIG);
1492 if (failed(mlir::applyPartialConversion(getOperation(), target,
1494 return signalPassFailure();
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc, Value shiftAmount, int64_t maxShiftAmount, llvm::function_ref< Value(int64_t)> getPadding, llvm::function_ref< Value(int64_t)> getExtract)
static APInt substitueMaskToValues(size_t width, llvm::SmallVectorImpl< ConstantOrValue > &constantOrValues, uint32_t mask)
static void populateCombToAIGConversionPatterns(RewritePatternSet &patterns, uint32_t maxEmulationUnknownBits, bool lowerToMIG)
static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a, Value b, Value carry, bool useMajorityInverterOp)
static LogicalResult emulateBinaryOpForUnknownBits(ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits, Operation *op, llvm::function_ref< APInt(const APInt &, const APInt &)> emulate)
static int64_t getNumUnknownBitsAndPopulateValues(Value value, llvm::SmallVectorImpl< ConstantOrValue > &values)
static Value extractOtherThanMSB(OpBuilder &builder, Value val)
static Value extractMSB(OpBuilder &builder, Value val)
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
static Value lowerFullyAssociativeOp(Operation &op, OperandRange operands, SmallVector< Operation * > &newOps)
Lower a variadic fully-associative operation into an expression tree.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.