CIRCT 22.0.0git
Loading...
Searching...
No Matches
CombToSynth.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the main Comb to Synth Conversion Pass Implementation.
10//
11// High-level Comb Operations
12// | |
13// v |
14// +-------------------+ |
15// | and, or, xor, mux | |
16// +---------+---------+ |
17// | |
18// +-------+--------+ |
19// v v v
20// +-----+ +-----+
21// | AIG |-------->| MIG |
22// +-----+ +-----+
23//
24//===----------------------------------------------------------------------===//
25
33#include "mlir/Pass/Pass.h"
34#include "mlir/Transforms/DialectConversion.h"
35#include "llvm/ADT/APInt.h"
36#include "llvm/ADT/PointerUnion.h"
37#include "llvm/Support/Debug.h"
38#include <array>
39
40#define DEBUG_TYPE "comb-to-synth"
41
42namespace circt {
43#define GEN_PASS_DEF_CONVERTCOMBTOSYNTH
44#include "circt/Conversion/Passes.h.inc"
45} // namespace circt
46
47using namespace circt;
48using namespace comb;
49
50//===----------------------------------------------------------------------===//
51// Utility Functions
52//===----------------------------------------------------------------------===//
53
54// A wrapper for comb::extractBits that returns a SmallVector<Value>.
55static SmallVector<Value> extractBits(OpBuilder &builder, Value val) {
56 SmallVector<Value> bits;
57 comb::extractBits(builder, val, bits);
58 return bits;
59}
60
61// Construct a mux tree for shift operations. `isLeftShift` controls the
62// direction of the shift operation and is used to determine order of the
63// padding and extracted bits. Callbacks `getPadding` and `getExtract` are used
64// to get the padding and extracted bits for each shift amount. `getPadding`
65// could return a nullptr as i0 value but except for that, these callbacks must
66// return a valid value for each shift amount in the range [0, maxShiftAmount].
67// The value for `maxShiftAmount` is used as the out-of-bounds value.
68template <bool isLeftShift>
69static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc,
70 Value shiftAmount, int64_t maxShiftAmount,
71 llvm::function_ref<Value(int64_t)> getPadding,
72 llvm::function_ref<Value(int64_t)> getExtract) {
73 // Extract individual bits from shift amount
74 auto bits = extractBits(rewriter, shiftAmount);
75
76 // Create nodes for each possible shift amount
77 SmallVector<Value> nodes;
78 nodes.reserve(maxShiftAmount);
79 for (int64_t i = 0; i < maxShiftAmount; ++i) {
80 Value extract = getExtract(i);
81 Value padding = getPadding(i);
82
83 if (!padding) {
84 nodes.push_back(extract);
85 continue;
86 }
87
88 // Concatenate extracted bits with padding
89 if (isLeftShift)
90 nodes.push_back(
91 rewriter.createOrFold<comb::ConcatOp>(loc, extract, padding));
92 else
93 nodes.push_back(
94 rewriter.createOrFold<comb::ConcatOp>(loc, padding, extract));
95 }
96
97 // Create out-of-bounds value
98 auto outOfBoundsValue = getPadding(maxShiftAmount);
99 assert(outOfBoundsValue && "outOfBoundsValue must be valid");
100
101 // Construct mux tree for shift operation
102 auto result =
103 comb::constructMuxTree(rewriter, loc, bits, nodes, outOfBoundsValue);
104
105 // Add bounds checking
106 auto inBound = rewriter.createOrFold<comb::ICmpOp>(
107 loc, ICmpPredicate::ult, shiftAmount,
108 hw::ConstantOp::create(rewriter, loc, shiftAmount.getType(),
109 maxShiftAmount));
110
111 return rewriter.createOrFold<comb::MuxOp>(loc, inBound, result,
112 outOfBoundsValue);
113}
114
115// Return a majority operation if MIG is enabled, otherwise return a majority
116// function implemented with Comb operations. In that case `carry` has slightly
117// smaller depth than the other inputs.
118static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a,
119 Value b, Value carry,
120 bool useMajorityInverterOp) {
121 if (useMajorityInverterOp) {
122 std::array<Value, 3> inputs = {a, b, carry};
123 std::array<bool, 3> inverts = {false, false, false};
124 return synth::mig::MajorityInverterOp::create(rewriter, loc, inputs,
125 inverts);
126 }
127
128 // maj(a, b, c) = (c & (a ^ b)) | (a & b)
129 auto aXnorB = comb::XorOp::create(rewriter, loc, ValueRange{a, b}, true);
130 auto andOp =
131 comb::AndOp::create(rewriter, loc, ValueRange{carry, aXnorB}, true);
132 auto aAndB = comb::AndOp::create(rewriter, loc, ValueRange{a, b}, true);
133 return comb::OrOp::create(rewriter, loc, ValueRange{andOp, aAndB}, true);
134}
135
136static Value extractMSB(OpBuilder &builder, Value val) {
137 return builder.createOrFold<comb::ExtractOp>(
138 val.getLoc(), val, val.getType().getIntOrFloatBitWidth() - 1, 1);
139}
140
141static Value extractOtherThanMSB(OpBuilder &builder, Value val) {
142 return builder.createOrFold<comb::ExtractOp>(
143 val.getLoc(), val, 0, val.getType().getIntOrFloatBitWidth() - 1);
144}
145
146namespace {
147// A union of Value and IntegerAttr to cleanly handle constant values.
148using ConstantOrValue = llvm::PointerUnion<Value, mlir::IntegerAttr>;
149} // namespace
150
151// Return the number of unknown bits and populate the concatenated values.
153 Value value, llvm::SmallVectorImpl<ConstantOrValue> &values) {
154 // Constant or zero width value are all known.
155 if (value.getType().isInteger(0))
156 return 0;
157
158 // Recursively count unknown bits for concat.
159 if (auto concat = value.getDefiningOp<comb::ConcatOp>()) {
160 int64_t totalUnknownBits = 0;
161 for (auto concatInput : llvm::reverse(concat.getInputs())) {
162 auto unknownBits =
163 getNumUnknownBitsAndPopulateValues(concatInput, values);
164 if (unknownBits < 0)
165 return unknownBits;
166 totalUnknownBits += unknownBits;
167 }
168 return totalUnknownBits;
169 }
170
171 // Constant value is known.
172 if (auto constant = value.getDefiningOp<hw::ConstantOp>()) {
173 values.push_back(constant.getValueAttr());
174 return 0;
175 }
176
177 // Consider other operations as unknown bits.
178 // TODO: We can handle replicate, extract, etc.
179 values.push_back(value);
180 return hw::getBitWidth(value.getType());
181}
182
183// Return a value that substitutes the unknown bits with the mask.
184static APInt
186 llvm::SmallVectorImpl<ConstantOrValue> &constantOrValues,
187 uint32_t mask) {
188 uint32_t bitPos = 0, unknownPos = 0;
189 APInt result(width, 0);
190 for (auto constantOrValue : constantOrValues) {
191 int64_t elemWidth;
192 if (auto constant = dyn_cast<IntegerAttr>(constantOrValue)) {
193 elemWidth = constant.getValue().getBitWidth();
194 result.insertBits(constant.getValue(), bitPos);
195 } else {
196 elemWidth = hw::getBitWidth(cast<Value>(constantOrValue).getType());
197 assert(elemWidth >= 0 && "unknown bit width");
198 assert(elemWidth + unknownPos < 32 && "unknown bit width too large");
199 // Create a mask for the unknown bits.
200 uint32_t usedBits = (mask >> unknownPos) & ((1 << elemWidth) - 1);
201 result.insertBits(APInt(elemWidth, usedBits), bitPos);
202 unknownPos += elemWidth;
203 }
204 bitPos += elemWidth;
205 }
206
207 return result;
208}
209
210// Emulate a binary operation with unknown bits using a table lookup.
211// This function enumerates all possible combinations of unknown bits and
212// emulates the operation for each combination.
213static LogicalResult emulateBinaryOpForUnknownBits(
214 ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits,
215 Operation *op,
216 llvm::function_ref<APInt(const APInt &, const APInt &)> emulate) {
217 SmallVector<ConstantOrValue> lhsValues, rhsValues;
218
219 assert(op->getNumResults() == 1 && op->getNumOperands() == 2 &&
220 "op must be a single result binary operation");
221
222 auto lhs = op->getOperand(0);
223 auto rhs = op->getOperand(1);
224 auto width = op->getResult(0).getType().getIntOrFloatBitWidth();
225 auto loc = op->getLoc();
226 auto numLhsUnknownBits = getNumUnknownBitsAndPopulateValues(lhs, lhsValues);
227 auto numRhsUnknownBits = getNumUnknownBitsAndPopulateValues(rhs, rhsValues);
228
229 // If unknown bit width is detected, abort the lowering.
230 if (numLhsUnknownBits < 0 || numRhsUnknownBits < 0)
231 return failure();
232
233 int64_t totalUnknownBits = numLhsUnknownBits + numRhsUnknownBits;
234 if (totalUnknownBits > maxEmulationUnknownBits)
235 return failure();
236
237 SmallVector<Value> emulatedResults;
238 emulatedResults.reserve(1 << totalUnknownBits);
239
240 // Emulate all possible cases.
241 DenseMap<IntegerAttr, hw::ConstantOp> constantPool;
242 auto getConstant = [&](const APInt &value) -> hw::ConstantOp {
243 auto attr = rewriter.getIntegerAttr(rewriter.getIntegerType(width), value);
244 auto it = constantPool.find(attr);
245 if (it != constantPool.end())
246 return it->second;
247 auto constant = hw::ConstantOp::create(rewriter, loc, value);
248 constantPool[attr] = constant;
249 return constant;
250 };
251
252 for (uint32_t lhsMask = 0, lhsMaskEnd = 1 << numLhsUnknownBits;
253 lhsMask < lhsMaskEnd; ++lhsMask) {
254 APInt lhsValue = substitueMaskToValues(width, lhsValues, lhsMask);
255 for (uint32_t rhsMask = 0, rhsMaskEnd = 1 << numRhsUnknownBits;
256 rhsMask < rhsMaskEnd; ++rhsMask) {
257 APInt rhsValue = substitueMaskToValues(width, rhsValues, rhsMask);
258 // Emulate.
259 emulatedResults.push_back(getConstant(emulate(lhsValue, rhsValue)));
260 }
261 }
262
263 // Create selectors for mux tree.
264 SmallVector<Value> selectors;
265 selectors.reserve(totalUnknownBits);
266 for (auto &concatedValues : {rhsValues, lhsValues})
267 for (auto valueOrConstant : concatedValues) {
268 auto value = dyn_cast<Value>(valueOrConstant);
269 if (!value)
270 continue;
271 extractBits(rewriter, value, selectors);
272 }
273
274 assert(totalUnknownBits == static_cast<int64_t>(selectors.size()) &&
275 "number of selectors must match");
276 auto muxed = constructMuxTree(rewriter, loc, selectors, emulatedResults,
277 getConstant(APInt::getZero(width)));
278
279 replaceOpAndCopyNamehint(rewriter, op, muxed);
280 return success();
281}
282
283//===----------------------------------------------------------------------===//
284// Conversion patterns
285//===----------------------------------------------------------------------===//
286
287namespace {
288
289/// Lower a comb::AndOp operation to synth::aig::AndInverterOp
290struct CombAndOpConversion : OpConversionPattern<AndOp> {
292
293 LogicalResult
294 matchAndRewrite(AndOp op, OpAdaptor adaptor,
295 ConversionPatternRewriter &rewriter) const override {
296 SmallVector<bool> nonInverts(adaptor.getInputs().size(), false);
297 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
298 rewriter, op, adaptor.getInputs(), nonInverts);
299 return success();
300 }
301};
302
303/// Lower a comb::OrOp operation to synth::aig::AndInverterOp with invert flags
304struct CombOrToAIGConversion : OpConversionPattern<OrOp> {
306
307 LogicalResult
308 matchAndRewrite(OrOp op, OpAdaptor adaptor,
309 ConversionPatternRewriter &rewriter) const override {
310 // Implement Or using And and invert flags: a | b = ~(~a & ~b)
311 SmallVector<bool> allInverts(adaptor.getInputs().size(), true);
312 auto andOp = synth::aig::AndInverterOp::create(
313 rewriter, op.getLoc(), adaptor.getInputs(), allInverts);
314 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
315 rewriter, op, andOp,
316 /*invert=*/true);
317 return success();
318 }
319};
320
321struct CombOrToMIGConversion : OpConversionPattern<OrOp> {
323 LogicalResult
324 matchAndRewrite(OrOp op, OpAdaptor adaptor,
325 ConversionPatternRewriter &rewriter) const override {
326 if (op.getNumOperands() != 2)
327 return failure();
328 SmallVector<Value, 3> inputs(adaptor.getInputs());
329 auto one = hw::ConstantOp::create(
330 rewriter, op.getLoc(),
331 APInt::getAllOnes(hw::getBitWidth(op.getType())));
332 inputs.push_back(one);
333 std::array<bool, 3> inverts = {false, false, false};
334 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
335 rewriter, op, inputs, inverts);
336 return success();
337 }
338};
339
340struct AndInverterToMIGConversion
341 : OpConversionPattern<synth::aig::AndInverterOp> {
342 using OpConversionPattern<synth::aig::AndInverterOp>::OpConversionPattern;
343 LogicalResult
344 matchAndRewrite(synth::aig::AndInverterOp op, OpAdaptor adaptor,
345 ConversionPatternRewriter &rewriter) const override {
346 if (op.getNumOperands() > 2)
347 return failure();
348 if (op.getNumOperands() == 1) {
349 SmallVector<bool, 1> inverts{op.getInverted()[0]};
350 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
351 rewriter, op, adaptor.getInputs(), inverts);
352 return success();
353 }
354 SmallVector<Value, 3> inputs(adaptor.getInputs());
355 auto one = hw::ConstantOp::create(
356 rewriter, op.getLoc(), APInt::getZero(hw::getBitWidth(op.getType())));
357 inputs.push_back(one);
358 SmallVector<bool, 3> inverts(adaptor.getInverted());
359 inverts.push_back(false);
360 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
361 rewriter, op, inputs, inverts);
362 return success();
363 }
364};
365
366/// Lower a comb::XorOp operation to AIG operations
367struct CombXorOpConversion : OpConversionPattern<XorOp> {
369
370 LogicalResult
371 matchAndRewrite(XorOp op, OpAdaptor adaptor,
372 ConversionPatternRewriter &rewriter) const override {
373 if (op.getNumOperands() != 2)
374 return failure();
375 // Xor using And with invert flags: a ^ b = (a | b) & (~a | ~b)
376
377 // (a | b) = ~(~a & ~b)
378 // (~a | ~b) = ~(a & b)
379 auto inputs = adaptor.getInputs();
380 SmallVector<bool> allInverts(inputs.size(), true);
381 SmallVector<bool> allNotInverts(inputs.size(), false);
382
383 auto notAAndNotB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
384 inputs, allInverts);
385 auto aAndB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
386 inputs, allNotInverts);
387
388 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
389 rewriter, op, notAAndNotB, aAndB,
390 /*lhs_invert=*/true,
391 /*rhs_invert=*/true);
392 return success();
393 }
394};
395
396template <typename OpTy>
397struct CombLowerVariadicOp : OpConversionPattern<OpTy> {
399 using OpAdaptor = typename OpConversionPattern<OpTy>::OpAdaptor;
400 LogicalResult
401 matchAndRewrite(OpTy op, OpAdaptor adaptor,
402 ConversionPatternRewriter &rewriter) const override {
403 auto result = lowerFullyAssociativeOp(op, op.getOperands(), rewriter);
404 replaceOpAndCopyNamehint(rewriter, op, result);
405 return success();
406 }
407
408 static Value lowerFullyAssociativeOp(OpTy op, OperandRange operands,
409 ConversionPatternRewriter &rewriter) {
410 Value lhs, rhs;
411 switch (operands.size()) {
412 case 0:
413 llvm_unreachable("cannot be called with empty operand range");
414 break;
415 case 1:
416 return operands[0];
417 case 2:
418 lhs = operands[0];
419 rhs = operands[1];
420 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
421 default:
422 auto firstHalf = operands.size() / 2;
423 lhs =
424 lowerFullyAssociativeOp(op, operands.take_front(firstHalf), rewriter);
425 rhs =
426 lowerFullyAssociativeOp(op, operands.drop_front(firstHalf), rewriter);
427 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
428 }
429 }
430};
431
432// Lower comb::MuxOp to AIG operations.
433struct CombMuxOpConversion : OpConversionPattern<MuxOp> {
435
436 LogicalResult
437 matchAndRewrite(MuxOp op, OpAdaptor adaptor,
438 ConversionPatternRewriter &rewriter) const override {
439 Value cond = op.getCond();
440 auto trueVal = op.getTrueValue();
441 auto falseVal = op.getFalseValue();
442
443 if (!op.getType().isInteger()) {
444 // If the type of the mux is not integer, bitcast the operands first.
445 auto widthType = rewriter.getIntegerType(hw::getBitWidth(op.getType()));
446 trueVal =
447 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, trueVal);
448 falseVal =
449 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, falseVal);
450 }
451
452 // Replicate condition if needed
453 if (!trueVal.getType().isInteger(1))
454 cond = comb::ReplicateOp::create(rewriter, op.getLoc(), trueVal.getType(),
455 cond);
456
457 // c ? a : b => (replicate(c) & a) | (~replicate(c) & b)
458 auto lhs =
459 synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond, trueVal);
460 auto rhs = synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond,
461 falseVal, true, false);
462
463 Value result = comb::OrOp::create(rewriter, op.getLoc(), lhs, rhs);
464 // Insert the bitcast if the type of the mux is not integer.
465 if (result.getType() != op.getType())
466 result =
467 hw::BitcastOp::create(rewriter, op.getLoc(), op.getType(), result);
468 replaceOpAndCopyNamehint(rewriter, op, result);
469 return success();
470 }
471};
472
473//===----------------------------------------------------------------------===//
474// Adder Architecture Selection
475//===----------------------------------------------------------------------===//
476
477enum AdderArchitecture { RippleCarry, Sklanskey, KoggeStone, BrentKung };
478AdderArchitecture determineAdderArch(Operation *op, int64_t width) {
479 auto strAttr = op->getAttrOfType<StringAttr>("synth.test.arch");
480 if (strAttr) {
481 return llvm::StringSwitch<AdderArchitecture>(strAttr.getValue())
482 .Case("SKLANSKEY", Sklanskey)
483 .Case("KOGGE-STONE", KoggeStone)
484 .Case("BRENT-KUNG", BrentKung)
485 .Case("RIPPLE-CARRY", RippleCarry);
486 }
487 // Determine using width as a heuristic.
488 // TODO: Perform a more thorough analysis to motivate the choices or
489 // implement an adder synthesis algorithm to construct an optimal adder
490 // under the given timing constraints - see the work of Zimmermann
491
492 // For very small adders, overhead of a parallel prefix adder is likely not
493 // worth it.
494 if (width < 8)
495 return AdderArchitecture::RippleCarry;
496
497 // Sklanskey is a good compromise for high-performance, but has high fanout
498 // which may lead to wiring congestion for very large adders.
499 if (width <= 32)
500 return AdderArchitecture::Sklanskey;
501
502 // Kogge-Stone uses greater area than Sklanskey but has lower fanout thus
503 // may be preferable for larger adders.
504 return AdderArchitecture::KoggeStone;
505}
506
507//===----------------------------------------------------------------------===//
508// Parallel Prefix Tree
509//===----------------------------------------------------------------------===//
510
511// Implement the Kogge-Stone parallel prefix tree
512// Described in https://en.wikipedia.org/wiki/Kogge%E2%80%93Stone_adder
513// Slightly better delay than Brent-Kung, but more area.
514void lowerKoggeStonePrefixTree(OpBuilder &builder, Location loc,
515 SmallVector<Value> &pPrefix,
516 SmallVector<Value> &gPrefix) {
517
518 auto width = static_cast<int64_t>(pPrefix.size());
519 assert(width == static_cast<int64_t>(gPrefix.size()));
520 SmallVector<Value> pPrefixNew = pPrefix;
521 SmallVector<Value> gPrefixNew = gPrefix;
522
523 // Kogge-Stone parallel prefix computation
524 for (int64_t stride = 1; stride < width; stride *= 2) {
525
526 for (int64_t i = stride; i < width; ++i) {
527 int64_t j = i - stride;
528
529 // Group generate: g_i OR (p_i AND g_j)
530 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
531 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
532
533 // Group propagate: p_i AND p_j
534 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
535 }
536
537 pPrefix = pPrefixNew;
538 gPrefix = gPrefixNew;
539 }
540
541 LLVM_DEBUG({
542 int64_t stage = 0;
543 for (int64_t stride = 1; stride < width; stride *= 2) {
544 llvm::dbgs()
545 << "--------------------------------------- Kogge-Stone Stage "
546 << stage << "\n";
547 for (int64_t i = stride; i < width; ++i) {
548 int64_t j = i - stride;
549 // Group generate: g_i OR (p_i AND g_j)
550 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
551 << " OR (P" << i << stage << " AND G" << j << stage
552 << ")\n";
553
554 // Group propagate: p_i AND p_j
555 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
556 << " AND P" << j << stage << "\n";
557 }
558 ++stage;
559 }
560 });
561}
562
563// Implement the Sklansky parallel prefix tree
564// High fan-out, low depth, low area
565void lowerSklanskeyPrefixTree(OpBuilder &builder, Location loc,
566 SmallVector<Value> &pPrefix,
567 SmallVector<Value> &gPrefix) {
568 auto width = static_cast<int64_t>(pPrefix.size());
569 assert(width == static_cast<int64_t>(gPrefix.size()));
570 SmallVector<Value> pPrefixNew = pPrefix;
571 SmallVector<Value> gPrefixNew = gPrefix;
572 for (int64_t stride = 1; stride < width; stride *= 2) {
573 for (int64_t i = stride; i < width; i += 2 * stride) {
574 for (int64_t k = 0; k < stride && i + k < width; ++k) {
575 int64_t idx = i + k;
576 int64_t j = i - 1;
577
578 // Group generate: g_idx OR (p_idx AND g_j)
579 Value andPG =
580 comb::AndOp::create(builder, loc, pPrefix[idx], gPrefix[j]);
581 gPrefixNew[idx] = comb::OrOp::create(builder, loc, gPrefix[idx], andPG);
582
583 // Group propagate: p_idx AND p_j
584 pPrefixNew[idx] =
585 comb::AndOp::create(builder, loc, pPrefix[idx], pPrefix[j]);
586 }
587 }
588
589 pPrefix = pPrefixNew;
590 gPrefix = gPrefixNew;
591 }
592
593 LLVM_DEBUG({
594 int64_t stage = 0;
595 for (int64_t stride = 1; stride < width; stride *= 2) {
596 llvm::dbgs() << "--------------------------------------- Sklanskey Stage "
597 << stage << "\n";
598 for (int64_t i = stride; i < width; i += 2 * stride) {
599 for (int64_t k = 0; k < stride && i + k < width; ++k) {
600 int64_t idx = i + k;
601 int64_t j = i - 1;
602 // Group generate: g_i OR (p_i AND g_j)
603 llvm::dbgs() << "G" << idx << stage + 1 << " = G" << idx << stage
604 << " OR (P" << idx << stage << " AND G" << j << stage
605 << ")\n";
606
607 // Group propagate: p_i AND p_j
608 llvm::dbgs() << "P" << idx << stage + 1 << " = P" << idx << stage
609 << " AND P" << j << stage << "\n";
610 }
611 }
612 ++stage;
613 }
614 });
615}
616
617// Implement the Brent-Kung parallel prefix tree
618// Described in https://en.wikipedia.org/wiki/Brent%E2%80%93Kung_adder
619// Slightly worse delay than Kogge-Stone, but less area.
620void lowerBrentKungPrefixTree(OpBuilder &builder, Location loc,
621 SmallVector<Value> &pPrefix,
622 SmallVector<Value> &gPrefix) {
623 auto width = static_cast<int64_t>(pPrefix.size());
624 assert(width == static_cast<int64_t>(gPrefix.size()));
625 SmallVector<Value> pPrefixNew = pPrefix;
626 SmallVector<Value> gPrefixNew = gPrefix;
627 // Brent-Kung parallel prefix computation
628 // Forward phase
629 int64_t stride;
630 for (stride = 1; stride < width; stride *= 2) {
631 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
632 int64_t j = i - stride;
633
634 // Group generate: g_i OR (p_i AND g_j)
635 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
636 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
637
638 // Group propagate: p_i AND p_j
639 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
640 }
641 pPrefix = pPrefixNew;
642 gPrefix = gPrefixNew;
643 }
644
645 // Backward phase
646 for (; stride > 0; stride /= 2) {
647 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
648 int64_t j = i - stride;
649
650 // Group generate: g_i OR (p_i AND g_j)
651 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
652 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
653
654 // Group propagate: p_i AND p_j
655 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
656 }
657 pPrefix = pPrefixNew;
658 gPrefix = gPrefixNew;
659 }
660
661 LLVM_DEBUG({
662 int64_t stage = 0;
663 for (stride = 1; stride < width; stride *= 2) {
664 llvm::dbgs() << "--------------------------------------- Brent-Kung FW "
665 << stage << " : Stride " << stride << "\n";
666 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
667 int64_t j = i - stride;
668
669 // Group generate: g_i OR (p_i AND g_j)
670 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
671 << " OR (P" << i << stage << " AND G" << j << stage
672 << ")\n";
673
674 // Group propagate: p_i AND p_j
675 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
676 << " AND P" << j << stage << "\n";
677 }
678 ++stage;
679 }
680
681 for (; stride > 0; stride /= 2) {
682 if (stride * 3 - 1 < width)
683 llvm::dbgs() << "--------------------------------------- Brent-Kung BW "
684 << stage << " : Stride " << stride << "\n";
685
686 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
687 int64_t j = i - stride;
688
689 // Group generate: g_i OR (p_i AND g_j)
690 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
691 << " OR (P" << i << stage << " AND G" << j << stage
692 << ")\n";
693
694 // Group propagate: p_i AND p_j
695 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
696 << " AND P" << j << stage << "\n";
697 }
698 --stage;
699 }
700 });
701}
702
703// TODO: Generalize to other parallel prefix trees.
704class LazyKoggeStonePrefixTree {
705public:
706 LazyKoggeStonePrefixTree(OpBuilder &builder, Location loc, int64_t width,
707 ArrayRef<Value> pPrefix, ArrayRef<Value> gPrefix)
708 : builder(builder), loc(loc), width(width) {
709 assert(width > 0 && "width must be positive");
710 for (int64_t i = 0; i < width; ++i)
711 prefixCache[{0, i}] = {pPrefix[i], gPrefix[i]};
712 }
713
714 // Get the final group and propagate values for bit i.
715 std::pair<Value, Value> getFinal(int64_t i) {
716 assert(i >= 0 && i < width && "i out of bounds");
717 // Final level is ceil(log2(width)) in Kogge-Stone.
718 return getGroupAndPropagate(llvm::Log2_64_Ceil(width), i);
719 }
720
721private:
722 // Recursively get the group and propagate values for bit i at level `level`.
723 // Level 0 is the initial level with the input propagate and generate values.
724 // Level n computes the group and propagate values for a stride of 2^(n-1).
725 // Uses memoization to cache intermediate results.
726 std::pair<Value, Value> getGroupAndPropagate(int64_t level, int64_t i);
727 OpBuilder &builder;
728 Location loc;
729 int64_t width;
730 DenseMap<std::pair<int64_t, int64_t>, std::pair<Value, Value>> prefixCache;
731};
732
733std::pair<Value, Value>
734LazyKoggeStonePrefixTree::getGroupAndPropagate(int64_t level, int64_t i) {
735 assert(i < width && "i out of bounds");
736 auto key = std::make_pair(level, i);
737 auto it = prefixCache.find(key);
738 if (it != prefixCache.end())
739 return it->second;
740
741 assert(level > 0 && "If the level is 0, we should have hit the cache");
742
743 int64_t previousStride = 1ULL << (level - 1);
744 if (i < previousStride) {
745 // No dependency, just copy from the previous level.
746 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
747 prefixCache[key] = {propagateI, generateI};
748 return prefixCache[key];
749 }
750 // Get the dependency index.
751 int64_t j = i - previousStride;
752 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
753 auto [propagateJ, generateJ] = getGroupAndPropagate(level - 1, j);
754 // Group generate: g_i OR (p_i AND g_j)
755 Value andPG = comb::AndOp::create(builder, loc, propagateI, generateJ);
756 Value newGenerate = comb::OrOp::create(builder, loc, generateI, andPG);
757 // Group propagate: p_i AND p_j
758 Value newPropagate =
759 comb::AndOp::create(builder, loc, propagateI, propagateJ);
760 prefixCache[key] = {newPropagate, newGenerate};
761 return prefixCache[key];
762}
763
764template <bool lowerToMIG>
765struct CombAddOpConversion : OpConversionPattern<AddOp> {
767
768 LogicalResult
769 matchAndRewrite(AddOp op, OpAdaptor adaptor,
770 ConversionPatternRewriter &rewriter) const override {
771 auto inputs = adaptor.getInputs();
772 // Lower only when there are two inputs.
773 // Variadic operands must be lowered in a different pattern.
774 if (inputs.size() != 2)
775 return failure();
776
777 auto width = op.getType().getIntOrFloatBitWidth();
778 // Skip a zero width value.
779 if (width == 0) {
780 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
781 op.getType(), 0);
782 return success();
783 }
784
785 // Check if the architecture is specified by an attribute.
786 auto arch = determineAdderArch(op, width);
787 if (arch == AdderArchitecture::RippleCarry)
788 return lowerRippleCarryAdder(op, inputs, rewriter);
789 return lowerParallelPrefixAdder(op, inputs, rewriter);
790 }
791
792 // Implement a basic ripple-carry adder for small bitwidths.
793 LogicalResult
794 lowerRippleCarryAdder(comb::AddOp op, ValueRange inputs,
795 ConversionPatternRewriter &rewriter) const {
796 auto width = op.getType().getIntOrFloatBitWidth();
797 // Implement a naive Ripple-carry full adder.
798 Value carry;
799
800 auto aBits = extractBits(rewriter, inputs[0]);
801 auto bBits = extractBits(rewriter, inputs[1]);
802 SmallVector<Value> results;
803 results.resize(width);
804 for (int64_t i = 0; i < width; ++i) {
805 SmallVector<Value> xorOperands = {aBits[i], bBits[i]};
806 if (carry)
807 xorOperands.push_back(carry);
808
809 // sum[i] = xor(carry[i-1], a[i], b[i])
810 // NOTE: The result is stored in reverse order.
811 results[width - i - 1] =
812 comb::XorOp::create(rewriter, op.getLoc(), xorOperands, true);
813
814 // If this is the last bit, we are done.
815 if (i == width - 1)
816 break;
817
818 // carry[i] = (carry[i-1] & (a[i] ^ b[i])) | (a[i] & b[i])
819 if (!carry) {
820 // This is the first bit, so the carry is the next carry.
821 carry = comb::AndOp::create(rewriter, op.getLoc(),
822 ValueRange{aBits[i], bBits[i]}, true);
823 continue;
824 }
825
826 carry = createMajorityFunction(rewriter, op.getLoc(), aBits[i], bBits[i],
827 carry, lowerToMIG);
828 }
829 LLVM_DEBUG(llvm::dbgs() << "Lower comb.add to Ripple-Carry Adder of width "
830 << width << "\n");
831
832 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
833 return success();
834 }
835
836 // Implement a parallel prefix adder - with Kogge-Stone or Brent-Kung trees
837 // Will introduce unused signals for the carry bits but these will be removed
838 // by the AIG pass.
839 LogicalResult
840 lowerParallelPrefixAdder(comb::AddOp op, ValueRange inputs,
841 ConversionPatternRewriter &rewriter) const {
842 auto width = op.getType().getIntOrFloatBitWidth();
843
844 auto aBits = extractBits(rewriter, inputs[0]);
845 auto bBits = extractBits(rewriter, inputs[1]);
846
847 // Construct propagate (p) and generate (g) signals
848 SmallVector<Value> p, g;
849 p.reserve(width);
850 g.reserve(width);
851
852 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
853 // p_i = a_i XOR b_i
854 p.push_back(comb::XorOp::create(rewriter, op.getLoc(), aBit, bBit));
855 // g_i = a_i AND b_i
856 g.push_back(comb::AndOp::create(rewriter, op.getLoc(), aBit, bBit));
857 }
858
859 LLVM_DEBUG({
860 llvm::dbgs() << "Lower comb.add to Parallel-Prefix of width " << width
861 << "\n--------------------------------------- Init\n";
862
863 for (int64_t i = 0; i < width; ++i) {
864 // p_i = a_i XOR b_i
865 llvm::dbgs() << "P0" << i << " = A" << i << " XOR B" << i << "\n";
866 // g_i = a_i AND b_i
867 llvm::dbgs() << "G0" << i << " = A" << i << " AND B" << i << "\n";
868 }
869 });
870
871 // Create copies of p and g for the prefix computation
872 SmallVector<Value> pPrefix = p;
873 SmallVector<Value> gPrefix = g;
874
875 // Check if the architecture is specified by an attribute.
876 auto arch = determineAdderArch(op, width);
877
878 switch (arch) {
879 case AdderArchitecture::RippleCarry:
880 llvm_unreachable("Ripple-Carry should be handled separately");
881 break;
882 case AdderArchitecture::Sklanskey:
883 lowerSklanskeyPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
884 break;
885 case AdderArchitecture::KoggeStone:
886 lowerKoggeStonePrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
887 break;
888 case AdderArchitecture::BrentKung:
889 lowerBrentKungPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
890 break;
891 }
892
893 // Generate result sum bits
894 // NOTE: The result is stored in reverse order.
895 SmallVector<Value> results;
896 results.resize(width);
897 // Sum bit 0 is just p[0] since carry_in = 0
898 results[width - 1] = p[0];
899
900 // For remaining bits, sum_i = p_i XOR g_(i-1)
901 // The carry into position i is the group generate from position i-1
902 for (int64_t i = 1; i < width; ++i)
903 results[width - 1 - i] =
904 comb::XorOp::create(rewriter, op.getLoc(), p[i], gPrefix[i - 1]);
905
906 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
907
908 LLVM_DEBUG({
909 llvm::dbgs() << "--------------------------------------- Completion\n"
910 << "RES0 = P0\n";
911 for (int64_t i = 1; i < width; ++i)
912 llvm::dbgs() << "RES" << i << " = P" << i << " XOR G" << i - 1 << "\n";
913 });
914
915 return success();
916 }
917};
918
919struct CombMulOpConversion : OpConversionPattern<MulOp> {
921 using OpAdaptor = typename OpConversionPattern<MulOp>::OpAdaptor;
922 LogicalResult
923 matchAndRewrite(MulOp op, OpAdaptor adaptor,
924 ConversionPatternRewriter &rewriter) const override {
925 if (adaptor.getInputs().size() != 2)
926 return failure();
927
928 Location loc = op.getLoc();
929 Value a = adaptor.getInputs()[0];
930 Value b = adaptor.getInputs()[1];
931 unsigned width = op.getType().getIntOrFloatBitWidth();
932
933 // Skip a zero width value.
934 if (width == 0) {
935 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(), 0);
936 return success();
937 }
938
939 // Extract individual bits from operands
940 SmallVector<Value> aBits = extractBits(rewriter, a);
941 SmallVector<Value> bBits = extractBits(rewriter, b);
942
943 auto falseValue = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
944
945 // Generate partial products
946 SmallVector<SmallVector<Value>> partialProducts;
947 partialProducts.reserve(width);
948 for (unsigned i = 0; i < width; ++i) {
949 SmallVector<Value> row(i, falseValue);
950 row.reserve(width);
951 // Generate partial product bits
952 for (unsigned j = 0; i + j < width; ++j)
953 row.push_back(
954 rewriter.createOrFold<comb::AndOp>(loc, aBits[j], bBits[i]));
955
956 partialProducts.push_back(row);
957 }
958
959 // If the width is 1, we are done.
960 if (width == 1) {
961 rewriter.replaceOp(op, partialProducts[0][0]);
962 return success();
963 }
964
965 // Wallace tree reduction - reduce to two addends.
966 datapath::CompressorTree comp(width, partialProducts, loc);
967 auto addends = comp.compressToHeight(rewriter, 2);
968
969 // Sum the two addends using a carry-propagate adder
970 auto newAdd = comb::AddOp::create(rewriter, loc, addends, true);
971 replaceOpAndCopyNamehint(rewriter, op, newAdd);
972 return success();
973 }
974};
975
976template <typename OpTy>
977struct DivModOpConversionBase : OpConversionPattern<OpTy> {
978 DivModOpConversionBase(MLIRContext *context, int64_t maxEmulationUnknownBits)
979 : OpConversionPattern<OpTy>(context),
980 maxEmulationUnknownBits(maxEmulationUnknownBits) {
981 assert(maxEmulationUnknownBits < 32 &&
982 "maxEmulationUnknownBits must be less than 32");
983 }
984 const int64_t maxEmulationUnknownBits;
985};
986
987struct CombDivUOpConversion : DivModOpConversionBase<DivUOp> {
988 using DivModOpConversionBase<DivUOp>::DivModOpConversionBase;
989 LogicalResult
990 matchAndRewrite(DivUOp op, OpAdaptor adaptor,
991 ConversionPatternRewriter &rewriter) const override {
992 // Check if the divisor is a power of two.
993 if (llvm::succeeded(comb::convertDivUByPowerOfTwo(op, rewriter)))
994 return success();
995
996 // When rhs is not power of two and the number of unknown bits are small,
997 // create a mux tree that emulates all possible cases.
999 rewriter, maxEmulationUnknownBits, op,
1000 [](const APInt &lhs, const APInt &rhs) {
1001 // Division by zero is undefined, just return zero.
1002 if (rhs.isZero())
1003 return APInt::getZero(rhs.getBitWidth());
1004 return lhs.udiv(rhs);
1005 });
1006 }
1007};
1008
1009struct CombModUOpConversion : DivModOpConversionBase<ModUOp> {
1010 using DivModOpConversionBase<ModUOp>::DivModOpConversionBase;
1011 LogicalResult
1012 matchAndRewrite(ModUOp op, OpAdaptor adaptor,
1013 ConversionPatternRewriter &rewriter) const override {
1014 // Check if the divisor is a power of two.
1015 if (llvm::succeeded(comb::convertModUByPowerOfTwo(op, rewriter)))
1016 return success();
1017
1018 // When rhs is not power of two and the number of unknown bits are small,
1019 // create a mux tree that emulates all possible cases.
1021 rewriter, maxEmulationUnknownBits, op,
1022 [](const APInt &lhs, const APInt &rhs) {
1023 // Division by zero is undefined, just return zero.
1024 if (rhs.isZero())
1025 return APInt::getZero(rhs.getBitWidth());
1026 return lhs.urem(rhs);
1027 });
1028 }
1029};
1030
1031struct CombDivSOpConversion : DivModOpConversionBase<DivSOp> {
1032 using DivModOpConversionBase<DivSOp>::DivModOpConversionBase;
1033
1034 LogicalResult
1035 matchAndRewrite(DivSOp op, OpAdaptor adaptor,
1036 ConversionPatternRewriter &rewriter) const override {
1037 // Currently only lower with emulation.
1038 // TODO: Implement a signed division lowering at least for power of two.
1040 rewriter, maxEmulationUnknownBits, op,
1041 [](const APInt &lhs, const APInt &rhs) {
1042 // Division by zero is undefined, just return zero.
1043 if (rhs.isZero())
1044 return APInt::getZero(rhs.getBitWidth());
1045 return lhs.sdiv(rhs);
1046 });
1047 }
1048};
1049
1050struct CombModSOpConversion : DivModOpConversionBase<ModSOp> {
1051 using DivModOpConversionBase<ModSOp>::DivModOpConversionBase;
1052 LogicalResult
1053 matchAndRewrite(ModSOp op, OpAdaptor adaptor,
1054 ConversionPatternRewriter &rewriter) const override {
1055 // Currently only lower with emulation.
1056 // TODO: Implement a signed modulus lowering at least for power of two.
1058 rewriter, maxEmulationUnknownBits, op,
1059 [](const APInt &lhs, const APInt &rhs) {
1060 // Division by zero is undefined, just return zero.
1061 if (rhs.isZero())
1062 return APInt::getZero(rhs.getBitWidth());
1063 return lhs.srem(rhs);
1064 });
1065 }
1066};
1067
1068struct CombICmpOpConversion : OpConversionPattern<ICmpOp> {
1070
1071 // Simple comparator for small bit widths
1072 static Value constructRippleCarry(Location loc, Value a, Value b,
1073 bool includeEq,
1074 ConversionPatternRewriter &rewriter) {
1075 // Construct following unsigned comparison expressions.
1076 // a <= b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] <= b[n-1:0])
1077 // a < b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] < b[n-1:0])
1078 auto aBits = extractBits(rewriter, a);
1079 auto bBits = extractBits(rewriter, b);
1080 Value acc = hw::ConstantOp::create(rewriter, loc, APInt(1, includeEq));
1081
1082 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1083 auto aBitXorBBit =
1084 rewriter.createOrFold<comb::XorOp>(loc, aBit, bBit, true);
1085 auto aEqualB = rewriter.createOrFold<synth::aig::AndInverterOp>(
1086 loc, aBitXorBBit, true);
1087 auto pred = rewriter.createOrFold<synth::aig::AndInverterOp>(
1088 loc, aBit, bBit, true, false);
1089
1090 auto aBitAndBBit = rewriter.createOrFold<comb::AndOp>(
1091 loc, ValueRange{aEqualB, acc}, true);
1092 acc = rewriter.createOrFold<comb::OrOp>(loc, pred, aBitAndBBit, true);
1093 }
1094 return acc;
1095 }
1096
1097 // Compute prefix comparison using parallel prefix algorithm
1098 // Note: This generates all intermediate prefix values even though we only
1099 // need the final result. Optimizing this to skip intermediate computations
1100 // is non-trivial because each iteration depends on results from previous
1101 // iterations. We rely on DCE passes to remove unused operations.
1102 // TODO: Lazily compute only the required prefix values. Kogge-Stone is
1103 // already implemented in a lazy manner below, but other architectures can
1104 // also be optimized.
1105 static Value computePrefixComparison(ConversionPatternRewriter &rewriter,
1106 Location loc, SmallVector<Value> pPrefix,
1107 SmallVector<Value> gPrefix,
1108 bool includeEq, AdderArchitecture arch) {
1109 auto width = pPrefix.size();
1110 Value finalGroup, finalPropagate;
1111 // Apply the appropriate prefix tree algorithm
1112 switch (arch) {
1113 case AdderArchitecture::RippleCarry:
1114 llvm_unreachable("Ripple-Carry should be handled separately");
1115 break;
1116 case AdderArchitecture::Sklanskey: {
1117 lowerSklanskeyPrefixTree(rewriter, loc, pPrefix, gPrefix);
1118 finalGroup = gPrefix[width - 1];
1119 finalPropagate = pPrefix[width - 1];
1120 break;
1121 }
1122 case AdderArchitecture::KoggeStone:
1123 // Use lazy Kogge-Stone implementation to avoid computing all
1124 // intermediate prefix values.
1125 std::tie(finalPropagate, finalGroup) =
1126 LazyKoggeStonePrefixTree(rewriter, loc, width, pPrefix, gPrefix)
1127 .getFinal(width - 1);
1128 break;
1129 case AdderArchitecture::BrentKung: {
1130 lowerBrentKungPrefixTree(rewriter, loc, pPrefix, gPrefix);
1131 finalGroup = gPrefix[width - 1];
1132 finalPropagate = pPrefix[width - 1];
1133 break;
1134 }
1135 }
1136
1137 // Final result: `finalGroup` gives us "a < b"
1138 if (includeEq) {
1139 // a <= b iff (a < b) OR (a == b)
1140 // a == b iff `finalPropagate` (all bits are equal)
1141 return comb::OrOp::create(rewriter, loc, finalGroup, finalPropagate);
1142 }
1143 // a < b iff `finalGroup`
1144 return finalGroup;
1145 }
1146
1147 // Construct an unsigned comparator using either ripple-carry or
1148 // parallel-prefix architecture. Comparison uses parallel prefix tree as an
1149 // internal component, so use `AdderArchitecture` enum to select architecture.
1150 static Value constructUnsignedCompare(Operation *op, Location loc, Value a,
1151 Value b, bool isLess, bool includeEq,
1152 ConversionPatternRewriter &rewriter) {
1153 // Ensure a <= b by swapping for simplicity.
1154 if (!isLess)
1155 std::swap(a, b);
1156 auto width = a.getType().getIntOrFloatBitWidth();
1157
1158 // Check if the architecture is specified by an attribute.
1159 auto arch = determineAdderArch(op, width);
1160 if (arch == AdderArchitecture::RippleCarry)
1161 return constructRippleCarry(loc, a, b, includeEq, rewriter);
1162
1163 // For larger widths, use parallel prefix tree
1164 auto aBits = extractBits(rewriter, a);
1165 auto bBits = extractBits(rewriter, b);
1166
1167 // For comparison, we compute:
1168 // - Equal bits: eq_i = ~(a_i ^ b_i)
1169 // - Greater bits: gt_i = ~a_i & b_i (a_i < b_i)
1170 // - Propagate: p_i = eq_i (equality propagates)
1171 // - Generate: g_i = gt_i (greater-than generates)
1172 SmallVector<Value> eq, gt;
1173 eq.reserve(width);
1174 gt.reserve(width);
1175
1176 auto one =
1177 hw::ConstantOp::create(rewriter, loc, rewriter.getIntegerType(1), 1);
1178
1179 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1180 // eq_i = ~(a_i ^ b_i) = a_i == b_i
1181 auto xorBit = comb::XorOp::create(rewriter, loc, aBit, bBit);
1182 eq.push_back(comb::XorOp::create(rewriter, loc, xorBit, one));
1183
1184 // gt_i = ~a_i & b_i = a_i < b_i
1185 auto notA = comb::XorOp::create(rewriter, loc, aBit, one);
1186 gt.push_back(comb::AndOp::create(rewriter, loc, notA, bBit));
1187 }
1188
1189 return computePrefixComparison(rewriter, loc, std::move(eq), std::move(gt),
1190 includeEq, arch);
1191 }
1192
1193 LogicalResult
1194 matchAndRewrite(ICmpOp op, OpAdaptor adaptor,
1195 ConversionPatternRewriter &rewriter) const override {
1196 auto lhs = adaptor.getLhs();
1197 auto rhs = adaptor.getRhs();
1198
1199 switch (op.getPredicate()) {
1200 default:
1201 return failure();
1202
1203 case ICmpPredicate::eq:
1204 case ICmpPredicate::ceq: {
1205 // a == b ==> ~(a[n] ^ b[n]) & ~(a[n-1] ^ b[n-1]) & ...
1206 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1207 auto xorBits = extractBits(rewriter, xorOp);
1208 SmallVector<bool> allInverts(xorBits.size(), true);
1209 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
1210 rewriter, op, xorBits, allInverts);
1211 return success();
1212 }
1213
1214 case ICmpPredicate::ne:
1215 case ICmpPredicate::cne: {
1216 // a != b ==> (a[n] ^ b[n]) | (a[n-1] ^ b[n-1]) | ...
1217 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1218 replaceOpWithNewOpAndCopyNamehint<comb::OrOp>(
1219 rewriter, op, extractBits(rewriter, xorOp), true);
1220 return success();
1221 }
1222
1223 case ICmpPredicate::uge:
1224 case ICmpPredicate::ugt:
1225 case ICmpPredicate::ule:
1226 case ICmpPredicate::ult: {
1227 bool isLess = op.getPredicate() == ICmpPredicate::ult ||
1228 op.getPredicate() == ICmpPredicate::ule;
1229 bool includeEq = op.getPredicate() == ICmpPredicate::uge ||
1230 op.getPredicate() == ICmpPredicate::ule;
1231 replaceOpAndCopyNamehint(rewriter, op,
1232 constructUnsignedCompare(op, op.getLoc(), lhs,
1233 rhs, isLess, includeEq,
1234 rewriter));
1235 return success();
1236 }
1237 case ICmpPredicate::slt:
1238 case ICmpPredicate::sle:
1239 case ICmpPredicate::sgt:
1240 case ICmpPredicate::sge: {
1241 if (lhs.getType().getIntOrFloatBitWidth() == 0)
1242 return rewriter.notifyMatchFailure(
1243 op.getLoc(), "i0 signed comparison is unsupported");
1244 bool isLess = op.getPredicate() == ICmpPredicate::slt ||
1245 op.getPredicate() == ICmpPredicate::sle;
1246 bool includeEq = op.getPredicate() == ICmpPredicate::sge ||
1247 op.getPredicate() == ICmpPredicate::sle;
1248
1249 // Get a sign bit
1250 auto signA = extractMSB(rewriter, lhs);
1251 auto signB = extractMSB(rewriter, rhs);
1252 auto aRest = extractOtherThanMSB(rewriter, lhs);
1253 auto bRest = extractOtherThanMSB(rewriter, rhs);
1254
1255 // Compare magnitudes (all bits except sign)
1256 auto sameSignResult = constructUnsignedCompare(
1257 op, op.getLoc(), aRest, bRest, isLess, includeEq, rewriter);
1258
1259 // XOR of signs: true if signs are different
1260 auto signsDiffer =
1261 comb::XorOp::create(rewriter, op.getLoc(), signA, signB);
1262
1263 // Result when signs are different
1264 Value diffSignResult = isLess ? signA : signB;
1265
1266 // Final result: choose based on whether signs differ
1267 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
1268 rewriter, op, signsDiffer, diffSignResult, sameSignResult);
1269 return success();
1270 }
1271 }
1272 }
1273};
1274
1275struct CombParityOpConversion : OpConversionPattern<ParityOp> {
1277
1278 LogicalResult
1279 matchAndRewrite(ParityOp op, OpAdaptor adaptor,
1280 ConversionPatternRewriter &rewriter) const override {
1281 // Parity is the XOR of all bits.
1282 replaceOpWithNewOpAndCopyNamehint<comb::XorOp>(
1283 rewriter, op, extractBits(rewriter, adaptor.getInput()), true);
1284 return success();
1285 }
1286};
1287
1288struct CombShlOpConversion : OpConversionPattern<comb::ShlOp> {
1290
1291 LogicalResult
1292 matchAndRewrite(comb::ShlOp op, OpAdaptor adaptor,
1293 ConversionPatternRewriter &rewriter) const override {
1294 auto width = op.getType().getIntOrFloatBitWidth();
1295 auto lhs = adaptor.getLhs();
1296 auto result = createShiftLogic</*isLeftShift=*/true>(
1297 rewriter, op.getLoc(), adaptor.getRhs(), width,
1298 /*getPadding=*/
1299 [&](int64_t index) {
1300 // Don't create zero width value.
1301 if (index == 0)
1302 return Value();
1303 // Padding is 0 for left shift.
1304 return rewriter.createOrFold<hw::ConstantOp>(
1305 op.getLoc(), rewriter.getIntegerType(index), 0);
1306 },
1307 /*getExtract=*/
1308 [&](int64_t index) {
1309 assert(index < width && "index out of bounds");
1310 // Exract the bits from LSB.
1311 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, 0,
1312 width - index);
1313 });
1314
1315 replaceOpAndCopyNamehint(rewriter, op, result);
1316 return success();
1317 }
1318};
1319
1320struct CombShrUOpConversion : OpConversionPattern<comb::ShrUOp> {
1322
1323 LogicalResult
1324 matchAndRewrite(comb::ShrUOp op, OpAdaptor adaptor,
1325 ConversionPatternRewriter &rewriter) const override {
1326 auto width = op.getType().getIntOrFloatBitWidth();
1327 auto lhs = adaptor.getLhs();
1328 auto result = createShiftLogic</*isLeftShift=*/false>(
1329 rewriter, op.getLoc(), adaptor.getRhs(), width,
1330 /*getPadding=*/
1331 [&](int64_t index) {
1332 // Don't create zero width value.
1333 if (index == 0)
1334 return Value();
1335 // Padding is 0 for right shift.
1336 return rewriter.createOrFold<hw::ConstantOp>(
1337 op.getLoc(), rewriter.getIntegerType(index), 0);
1338 },
1339 /*getExtract=*/
1340 [&](int64_t index) {
1341 assert(index < width && "index out of bounds");
1342 // Exract the bits from MSB.
1343 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1344 width - index);
1345 });
1346
1347 replaceOpAndCopyNamehint(rewriter, op, result);
1348 return success();
1349 }
1350};
1351
1352struct CombShrSOpConversion : OpConversionPattern<comb::ShrSOp> {
1354
1355 LogicalResult
1356 matchAndRewrite(comb::ShrSOp op, OpAdaptor adaptor,
1357 ConversionPatternRewriter &rewriter) const override {
1358 auto width = op.getType().getIntOrFloatBitWidth();
1359 if (width == 0)
1360 return rewriter.notifyMatchFailure(op.getLoc(),
1361 "i0 signed shift is unsupported");
1362 auto lhs = adaptor.getLhs();
1363 // Get the sign bit.
1364 auto sign =
1365 rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, width - 1, 1);
1366
1367 // NOTE: The max shift amount is width - 1 because the sign bit is
1368 // already shifted out.
1369 auto result = createShiftLogic</*isLeftShift=*/false>(
1370 rewriter, op.getLoc(), adaptor.getRhs(), width - 1,
1371 /*getPadding=*/
1372 [&](int64_t index) {
1373 return rewriter.createOrFold<comb::ReplicateOp>(op.getLoc(), sign,
1374 index + 1);
1375 },
1376 /*getExtract=*/
1377 [&](int64_t index) {
1378 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1379 width - index - 1);
1380 });
1381
1382 replaceOpAndCopyNamehint(rewriter, op, result);
1383 return success();
1384 }
1385};
1386
1387} // namespace
1388
1389//===----------------------------------------------------------------------===//
1390// Convert Comb to AIG pass
1391//===----------------------------------------------------------------------===//
1392
1393namespace {
1394struct ConvertCombToSynthPass
1395 : public impl::ConvertCombToSynthBase<ConvertCombToSynthPass> {
1396 void runOnOperation() override;
1397 using ConvertCombToSynthBase<ConvertCombToSynthPass>::ConvertCombToSynthBase;
1398};
1399} // namespace
1400
1401static void
1403 uint32_t maxEmulationUnknownBits,
1404 bool lowerToMIG) {
1405 patterns.add<
1406 // Bitwise Logical Ops
1407 CombAndOpConversion, CombXorOpConversion, CombMuxOpConversion,
1408 CombParityOpConversion,
1409 // Arithmetic Ops
1410 CombMulOpConversion, CombICmpOpConversion,
1411 // Shift Ops
1412 CombShlOpConversion, CombShrUOpConversion, CombShrSOpConversion,
1413 // Variadic ops that must be lowered to binary operations
1414 CombLowerVariadicOp<XorOp>, CombLowerVariadicOp<AddOp>,
1415 CombLowerVariadicOp<MulOp>>(patterns.getContext());
1416
1417 patterns.add(comb::convertSubToAdd);
1418
1419 if (lowerToMIG) {
1420 patterns.add<CombOrToMIGConversion, CombLowerVariadicOp<OrOp>,
1421 AndInverterToMIGConversion,
1423 CombAddOpConversion</*useMIG=*/true>>(patterns.getContext());
1424 } else {
1425 patterns.add<CombOrToAIGConversion, CombAddOpConversion</*useMIG=*/false>>(
1426 patterns.getContext());
1427 }
1428
1429 // Add div/mod patterns with a threshold given by the pass option.
1430 patterns.add<CombDivUOpConversion, CombModUOpConversion, CombDivSOpConversion,
1431 CombModSOpConversion>(patterns.getContext(),
1432 maxEmulationUnknownBits);
1433}
1434
1435void ConvertCombToSynthPass::runOnOperation() {
1436 ConversionTarget target(getContext());
1437
1438 // Comb is source dialect.
1439 target.addIllegalDialect<comb::CombDialect>();
1440 // Keep data movement operations like Extract, Concat and Replicate.
1441 target.addLegalOp<comb::ExtractOp, comb::ConcatOp, comb::ReplicateOp,
1443
1444 // Treat array operations as illegal. Strictly speaking, other than array
1445 // get operation with non-const index are legal in AIG but array types
1446 // prevent a bunch of optimizations so just lower them to integer
1447 // operations. It's required to run HWAggregateToComb pass before this pass.
1449 hw::AggregateConstantOp>();
1450
1451 target.addLegalDialect<synth::SynthDialect>();
1452
1453 if (targetIR == CombToSynthTargetIR::AIG) {
1454 // AIG is target dialect.
1455 target.addIllegalOp<synth::mig::MajorityInverterOp>();
1456 } else if (targetIR == CombToSynthTargetIR::MIG) {
1457 target.addIllegalOp<synth::aig::AndInverterOp>();
1458 }
1459
1460 // If additional legal ops are specified, add them to the target.
1461 if (!additionalLegalOps.empty())
1462 for (const auto &opName : additionalLegalOps)
1463 target.addLegalOp(OperationName(opName, &getContext()));
1464
1465 RewritePatternSet patterns(&getContext());
1466 populateCombToAIGConversionPatterns(patterns, maxEmulationUnknownBits,
1467 targetIR == CombToSynthTargetIR::MIG);
1468
1469 if (failed(mlir::applyPartialConversion(getOperation(), target,
1470 std::move(patterns))))
1471 return signalPassFailure();
1472}
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
Definition CalyxOps.cpp:540
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc, Value shiftAmount, int64_t maxShiftAmount, llvm::function_ref< Value(int64_t)> getPadding, llvm::function_ref< Value(int64_t)> getExtract)
static APInt substitueMaskToValues(size_t width, llvm::SmallVectorImpl< ConstantOrValue > &constantOrValues, uint32_t mask)
static void populateCombToAIGConversionPatterns(RewritePatternSet &patterns, uint32_t maxEmulationUnknownBits, bool lowerToMIG)
static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a, Value b, Value carry, bool useMajorityInverterOp)
static LogicalResult emulateBinaryOpForUnknownBits(ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits, Operation *op, llvm::function_ref< APInt(const APInt &, const APInt &)> emulate)
static int64_t getNumUnknownBitsAndPopulateValues(Value value, llvm::SmallVectorImpl< ConstantOrValue > &values)
static Value extractOtherThanMSB(OpBuilder &builder, Value val)
static Value extractMSB(OpBuilder &builder, Value val)
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
static Value lowerFullyAssociativeOp(Operation &op, OperandRange operands, SmallVector< Operation * > &newOps)
Lower a variadic fully-associative operation into an expression tree.
create(data_type, value)
Definition hw.py:441
create(data_type, value)
Definition hw.py:433
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
Definition Naming.cpp:73
Definition comb.py:1