CIRCT 22.0.0git
Loading...
Searching...
No Matches
CombToSynth.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the main Comb to Synth Conversion Pass Implementation.
10//
11// High-level Comb Operations
12// | |
13// v |
14// +-------------------+ |
15// | and, or, xor, mux | |
16// +---------+---------+ |
17// | |
18// +-------+--------+ |
19// v v v
20// +-----+ +-----+
21// | AIG |-------->| MIG |
22// +-----+ +-----+
23//
24//===----------------------------------------------------------------------===//
25
33#include "mlir/Pass/Pass.h"
34#include "mlir/Transforms/DialectConversion.h"
35#include "llvm/ADT/APInt.h"
36#include "llvm/ADT/PointerUnion.h"
37#include "llvm/Support/Debug.h"
38#include <array>
39
40#define DEBUG_TYPE "comb-to-synth"
41
42namespace circt {
43#define GEN_PASS_DEF_CONVERTCOMBTOSYNTH
44#include "circt/Conversion/Passes.h.inc"
45} // namespace circt
46
47using namespace circt;
48using namespace comb;
49
50//===----------------------------------------------------------------------===//
51// Utility Functions
52//===----------------------------------------------------------------------===//
53
54// A wrapper for comb::extractBits that returns a SmallVector<Value>.
55static SmallVector<Value> extractBits(OpBuilder &builder, Value val) {
56 SmallVector<Value> bits;
57 comb::extractBits(builder, val, bits);
58 return bits;
59}
60
61// Construct a mux tree for shift operations. `isLeftShift` controls the
62// direction of the shift operation and is used to determine order of the
63// padding and extracted bits. Callbacks `getPadding` and `getExtract` are used
64// to get the padding and extracted bits for each shift amount. `getPadding`
65// could return a nullptr as i0 value but except for that, these callbacks must
66// return a valid value for each shift amount in the range [0, maxShiftAmount].
67// The value for `maxShiftAmount` is used as the out-of-bounds value.
68template <bool isLeftShift>
69static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc,
70 Value shiftAmount, int64_t maxShiftAmount,
71 llvm::function_ref<Value(int64_t)> getPadding,
72 llvm::function_ref<Value(int64_t)> getExtract) {
73 // Extract individual bits from shift amount
74 auto bits = extractBits(rewriter, shiftAmount);
75
76 // Create nodes for each possible shift amount
77 SmallVector<Value> nodes;
78 nodes.reserve(maxShiftAmount);
79 for (int64_t i = 0; i < maxShiftAmount; ++i) {
80 Value extract = getExtract(i);
81 Value padding = getPadding(i);
82
83 if (!padding) {
84 nodes.push_back(extract);
85 continue;
86 }
87
88 // Concatenate extracted bits with padding
89 if (isLeftShift)
90 nodes.push_back(
91 rewriter.createOrFold<comb::ConcatOp>(loc, extract, padding));
92 else
93 nodes.push_back(
94 rewriter.createOrFold<comb::ConcatOp>(loc, padding, extract));
95 }
96
97 // Create out-of-bounds value
98 auto outOfBoundsValue = getPadding(maxShiftAmount);
99 assert(outOfBoundsValue && "outOfBoundsValue must be valid");
100
101 // Construct mux tree for shift operation
102 auto result =
103 comb::constructMuxTree(rewriter, loc, bits, nodes, outOfBoundsValue);
104
105 // Add bounds checking
106 auto inBound = rewriter.createOrFold<comb::ICmpOp>(
107 loc, ICmpPredicate::ult, shiftAmount,
108 hw::ConstantOp::create(rewriter, loc, shiftAmount.getType(),
109 maxShiftAmount));
110
111 return rewriter.createOrFold<comb::MuxOp>(loc, inBound, result,
112 outOfBoundsValue);
113}
114
115// Return a majority operation if MIG is enabled, otherwise return a majority
116// function implemented with Comb operations. In that case `carry` has slightly
117// smaller depth than the other inputs.
118static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a,
119 Value b, Value carry,
120 bool useMajorityInverterOp) {
121 if (useMajorityInverterOp) {
122 std::array<Value, 3> inputs = {a, b, carry};
123 std::array<bool, 3> inverts = {false, false, false};
124 return synth::mig::MajorityInverterOp::create(rewriter, loc, inputs,
125 inverts);
126 }
127
128 // maj(a, b, c) = (c & (a ^ b)) | (a & b)
129 auto aXnorB = comb::XorOp::create(rewriter, loc, ValueRange{a, b}, true);
130 auto andOp =
131 comb::AndOp::create(rewriter, loc, ValueRange{carry, aXnorB}, true);
132 auto aAndB = comb::AndOp::create(rewriter, loc, ValueRange{a, b}, true);
133 return comb::OrOp::create(rewriter, loc, ValueRange{andOp, aAndB}, true);
134}
135
136static Value extractMSB(OpBuilder &builder, Value val) {
137 return builder.createOrFold<comb::ExtractOp>(
138 val.getLoc(), val, val.getType().getIntOrFloatBitWidth() - 1, 1);
139}
140
141static Value extractOtherThanMSB(OpBuilder &builder, Value val) {
142 return builder.createOrFold<comb::ExtractOp>(
143 val.getLoc(), val, 0, val.getType().getIntOrFloatBitWidth() - 1);
144}
145
146namespace {
147// A union of Value and IntegerAttr to cleanly handle constant values.
148using ConstantOrValue = llvm::PointerUnion<Value, mlir::IntegerAttr>;
149} // namespace
150
151// Return the number of unknown bits and populate the concatenated values.
153 Value value, llvm::SmallVectorImpl<ConstantOrValue> &values) {
154 // Constant or zero width value are all known.
155 if (value.getType().isInteger(0))
156 return 0;
157
158 // Recursively count unknown bits for concat.
159 if (auto concat = value.getDefiningOp<comb::ConcatOp>()) {
160 int64_t totalUnknownBits = 0;
161 for (auto concatInput : llvm::reverse(concat.getInputs())) {
162 auto unknownBits =
163 getNumUnknownBitsAndPopulateValues(concatInput, values);
164 if (unknownBits < 0)
165 return unknownBits;
166 totalUnknownBits += unknownBits;
167 }
168 return totalUnknownBits;
169 }
170
171 // Constant value is known.
172 if (auto constant = value.getDefiningOp<hw::ConstantOp>()) {
173 values.push_back(constant.getValueAttr());
174 return 0;
175 }
176
177 // Consider other operations as unknown bits.
178 // TODO: We can handle replicate, extract, etc.
179 values.push_back(value);
180 return hw::getBitWidth(value.getType());
181}
182
183// Return a value that substitutes the unknown bits with the mask.
184static APInt
186 llvm::SmallVectorImpl<ConstantOrValue> &constantOrValues,
187 uint32_t mask) {
188 uint32_t bitPos = 0, unknownPos = 0;
189 APInt result(width, 0);
190 for (auto constantOrValue : constantOrValues) {
191 int64_t elemWidth;
192 if (auto constant = dyn_cast<IntegerAttr>(constantOrValue)) {
193 elemWidth = constant.getValue().getBitWidth();
194 result.insertBits(constant.getValue(), bitPos);
195 } else {
196 elemWidth = hw::getBitWidth(cast<Value>(constantOrValue).getType());
197 assert(elemWidth >= 0 && "unknown bit width");
198 assert(elemWidth + unknownPos < 32 && "unknown bit width too large");
199 // Create a mask for the unknown bits.
200 uint32_t usedBits = (mask >> unknownPos) & ((1 << elemWidth) - 1);
201 result.insertBits(APInt(elemWidth, usedBits), bitPos);
202 unknownPos += elemWidth;
203 }
204 bitPos += elemWidth;
205 }
206
207 return result;
208}
209
210// Emulate a binary operation with unknown bits using a table lookup.
211// This function enumerates all possible combinations of unknown bits and
212// emulates the operation for each combination.
213static LogicalResult emulateBinaryOpForUnknownBits(
214 ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits,
215 Operation *op,
216 llvm::function_ref<APInt(const APInt &, const APInt &)> emulate) {
217 SmallVector<ConstantOrValue> lhsValues, rhsValues;
218
219 assert(op->getNumResults() == 1 && op->getNumOperands() == 2 &&
220 "op must be a single result binary operation");
221
222 auto lhs = op->getOperand(0);
223 auto rhs = op->getOperand(1);
224 auto width = op->getResult(0).getType().getIntOrFloatBitWidth();
225 auto loc = op->getLoc();
226 auto numLhsUnknownBits = getNumUnknownBitsAndPopulateValues(lhs, lhsValues);
227 auto numRhsUnknownBits = getNumUnknownBitsAndPopulateValues(rhs, rhsValues);
228
229 // If unknown bit width is detected, abort the lowering.
230 if (numLhsUnknownBits < 0 || numRhsUnknownBits < 0)
231 return failure();
232
233 int64_t totalUnknownBits = numLhsUnknownBits + numRhsUnknownBits;
234 if (totalUnknownBits > maxEmulationUnknownBits)
235 return failure();
236
237 SmallVector<Value> emulatedResults;
238 emulatedResults.reserve(1 << totalUnknownBits);
239
240 // Emulate all possible cases.
241 DenseMap<IntegerAttr, hw::ConstantOp> constantPool;
242 auto getConstant = [&](const APInt &value) -> hw::ConstantOp {
243 auto attr = rewriter.getIntegerAttr(rewriter.getIntegerType(width), value);
244 auto it = constantPool.find(attr);
245 if (it != constantPool.end())
246 return it->second;
247 auto constant = hw::ConstantOp::create(rewriter, loc, value);
248 constantPool[attr] = constant;
249 return constant;
250 };
251
252 for (uint32_t lhsMask = 0, lhsMaskEnd = 1 << numLhsUnknownBits;
253 lhsMask < lhsMaskEnd; ++lhsMask) {
254 APInt lhsValue = substitueMaskToValues(width, lhsValues, lhsMask);
255 for (uint32_t rhsMask = 0, rhsMaskEnd = 1 << numRhsUnknownBits;
256 rhsMask < rhsMaskEnd; ++rhsMask) {
257 APInt rhsValue = substitueMaskToValues(width, rhsValues, rhsMask);
258 // Emulate.
259 emulatedResults.push_back(getConstant(emulate(lhsValue, rhsValue)));
260 }
261 }
262
263 // Create selectors for mux tree.
264 SmallVector<Value> selectors;
265 selectors.reserve(totalUnknownBits);
266 for (auto &concatedValues : {rhsValues, lhsValues})
267 for (auto valueOrConstant : concatedValues) {
268 auto value = dyn_cast<Value>(valueOrConstant);
269 if (!value)
270 continue;
271 extractBits(rewriter, value, selectors);
272 }
273
274 assert(totalUnknownBits == static_cast<int64_t>(selectors.size()) &&
275 "number of selectors must match");
276 auto muxed = constructMuxTree(rewriter, loc, selectors, emulatedResults,
277 getConstant(APInt::getZero(width)));
278
279 replaceOpAndCopyNamehint(rewriter, op, muxed);
280 return success();
281}
282
283//===----------------------------------------------------------------------===//
284// Conversion patterns
285//===----------------------------------------------------------------------===//
286
287namespace {
288
289/// Lower a comb::AndOp operation to synth::aig::AndInverterOp
290struct CombAndOpConversion : OpConversionPattern<AndOp> {
292
293 LogicalResult
294 matchAndRewrite(AndOp op, OpAdaptor adaptor,
295 ConversionPatternRewriter &rewriter) const override {
296 SmallVector<bool> nonInverts(adaptor.getInputs().size(), false);
297 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
298 rewriter, op, adaptor.getInputs(), nonInverts);
299 return success();
300 }
301};
302
303/// Lower a comb::OrOp operation to synth::aig::AndInverterOp with invert flags
304struct CombOrToAIGConversion : OpConversionPattern<OrOp> {
306
307 LogicalResult
308 matchAndRewrite(OrOp op, OpAdaptor adaptor,
309 ConversionPatternRewriter &rewriter) const override {
310 // Implement Or using And and invert flags: a | b = ~(~a & ~b)
311 SmallVector<bool> allInverts(adaptor.getInputs().size(), true);
312 auto andOp = synth::aig::AndInverterOp::create(
313 rewriter, op.getLoc(), adaptor.getInputs(), allInverts);
314 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
315 rewriter, op, andOp,
316 /*invert=*/true);
317 return success();
318 }
319};
320
321struct CombOrToMIGConversion : OpConversionPattern<OrOp> {
323 LogicalResult
324 matchAndRewrite(OrOp op, OpAdaptor adaptor,
325 ConversionPatternRewriter &rewriter) const override {
326 if (op.getNumOperands() != 2)
327 return failure();
328 SmallVector<Value, 3> inputs(adaptor.getInputs());
329 auto one = hw::ConstantOp::create(
330 rewriter, op.getLoc(),
331 APInt::getAllOnes(hw::getBitWidth(op.getType())));
332 inputs.push_back(one);
333 std::array<bool, 3> inverts = {false, false, false};
334 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
335 rewriter, op, inputs, inverts);
336 return success();
337 }
338};
339
340struct AndInverterToMIGConversion
341 : OpConversionPattern<synth::aig::AndInverterOp> {
342 using OpConversionPattern<synth::aig::AndInverterOp>::OpConversionPattern;
343 LogicalResult
344 matchAndRewrite(synth::aig::AndInverterOp op, OpAdaptor adaptor,
345 ConversionPatternRewriter &rewriter) const override {
346 if (op.getNumOperands() > 2)
347 return failure();
348 if (op.getNumOperands() == 1) {
349 SmallVector<bool, 1> inverts{op.getInverted()[0]};
350 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
351 rewriter, op, adaptor.getInputs(), inverts);
352 return success();
353 }
354 SmallVector<Value, 3> inputs(adaptor.getInputs());
355 auto one = hw::ConstantOp::create(
356 rewriter, op.getLoc(), APInt::getZero(hw::getBitWidth(op.getType())));
357 inputs.push_back(one);
358 SmallVector<bool, 3> inverts(adaptor.getInverted());
359 inverts.push_back(false);
360 replaceOpWithNewOpAndCopyNamehint<synth::mig::MajorityInverterOp>(
361 rewriter, op, inputs, inverts);
362 return success();
363 }
364};
365
366/// Lower a comb::XorOp operation to AIG operations
367struct CombXorOpConversion : OpConversionPattern<XorOp> {
369
370 LogicalResult
371 matchAndRewrite(XorOp op, OpAdaptor adaptor,
372 ConversionPatternRewriter &rewriter) const override {
373 if (op.getNumOperands() != 2)
374 return failure();
375 // Xor using And with invert flags: a ^ b = (a | b) & (~a | ~b)
376
377 // (a | b) = ~(~a & ~b)
378 // (~a | ~b) = ~(a & b)
379 auto inputs = adaptor.getInputs();
380 SmallVector<bool> allInverts(inputs.size(), true);
381 SmallVector<bool> allNotInverts(inputs.size(), false);
382
383 auto notAAndNotB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
384 inputs, allInverts);
385 auto aAndB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
386 inputs, allNotInverts);
387
388 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
389 rewriter, op, notAAndNotB, aAndB,
390 /*lhs_invert=*/true,
391 /*rhs_invert=*/true);
392 return success();
393 }
394};
395
396template <typename OpTy>
397struct CombLowerVariadicOp : OpConversionPattern<OpTy> {
399 using OpAdaptor = typename OpConversionPattern<OpTy>::OpAdaptor;
400 LogicalResult
401 matchAndRewrite(OpTy op, OpAdaptor adaptor,
402 ConversionPatternRewriter &rewriter) const override {
403 auto result = lowerFullyAssociativeOp(op, op.getOperands(), rewriter);
404 replaceOpAndCopyNamehint(rewriter, op, result);
405 return success();
406 }
407
408 static Value lowerFullyAssociativeOp(OpTy op, OperandRange operands,
409 ConversionPatternRewriter &rewriter) {
410 Value lhs, rhs;
411 switch (operands.size()) {
412 case 0:
413 llvm_unreachable("cannot be called with empty operand range");
414 break;
415 case 1:
416 return operands[0];
417 case 2:
418 lhs = operands[0];
419 rhs = operands[1];
420 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
421 default:
422 auto firstHalf = operands.size() / 2;
423 lhs =
424 lowerFullyAssociativeOp(op, operands.take_front(firstHalf), rewriter);
425 rhs =
426 lowerFullyAssociativeOp(op, operands.drop_front(firstHalf), rewriter);
427 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
428 }
429 }
430};
431
432// Lower comb::MuxOp to AIG operations.
433struct CombMuxOpConversion : OpConversionPattern<MuxOp> {
435
436 LogicalResult
437 matchAndRewrite(MuxOp op, OpAdaptor adaptor,
438 ConversionPatternRewriter &rewriter) const override {
439 Value cond = op.getCond();
440 auto trueVal = op.getTrueValue();
441 auto falseVal = op.getFalseValue();
442
443 if (!op.getType().isInteger()) {
444 // If the type of the mux is not integer, bitcast the operands first.
445 auto widthType = rewriter.getIntegerType(hw::getBitWidth(op.getType()));
446 trueVal =
447 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, trueVal);
448 falseVal =
449 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, falseVal);
450 }
451
452 // Replicate condition if needed
453 if (!trueVal.getType().isInteger(1))
454 cond = comb::ReplicateOp::create(rewriter, op.getLoc(), trueVal.getType(),
455 cond);
456
457 // c ? a : b => (replicate(c) & a) | (~replicate(c) & b)
458 auto lhs =
459 synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond, trueVal);
460 auto rhs = synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond,
461 falseVal, true, false);
462
463 Value result = comb::OrOp::create(rewriter, op.getLoc(), lhs, rhs);
464 // Insert the bitcast if the type of the mux is not integer.
465 if (result.getType() != op.getType())
466 result =
467 hw::BitcastOp::create(rewriter, op.getLoc(), op.getType(), result);
468 replaceOpAndCopyNamehint(rewriter, op, result);
469 return success();
470 }
471};
472
473//===----------------------------------------------------------------------===//
474// Adder Architecture Selection
475//===----------------------------------------------------------------------===//
476
477enum AdderArchitecture { RippleCarry, Sklanskey, KoggeStone, BrentKung };
478AdderArchitecture determineAdderArch(Operation *op, int64_t width) {
479 auto strAttr = op->getAttrOfType<StringAttr>("synth.test.arch");
480 if (strAttr) {
481 return llvm::StringSwitch<AdderArchitecture>(strAttr.getValue())
482 .Case("SKLANSKEY", Sklanskey)
483 .Case("KOGGE-STONE", KoggeStone)
484 .Case("BRENT-KUNG", BrentKung)
485 .Case("RIPPLE-CARRY", RippleCarry);
486 }
487 // Determine using width as a heuristic.
488 // TODO: Perform a more thorough analysis to motivate the choices or
489 // implement an adder synthesis algorithm to construct an optimal adder
490 // under the given timing constraints - see the work of Zimmermann
491
492 // For very small adders, overhead of a parallel prefix adder is likely not
493 // worth it.
494 if (width < 8)
495 return AdderArchitecture::RippleCarry;
496
497 // Sklanskey is a good compromise for high-performance, but has high fanout
498 // which may lead to wiring congestion for very large adders.
499 if (width <= 32)
500 return AdderArchitecture::Sklanskey;
501
502 // Kogge-Stone uses greater area than Sklanskey but has lower fanout thus
503 // may be preferable for larger adders.
504 return AdderArchitecture::KoggeStone;
505}
506
507//===----------------------------------------------------------------------===//
508// Parallel Prefix Tree
509//===----------------------------------------------------------------------===//
510
511// Implement the Kogge-Stone parallel prefix tree
512// Described in https://en.wikipedia.org/wiki/Kogge%E2%80%93Stone_adder
513// Slightly better delay than Brent-Kung, but more area.
514void lowerKoggeStonePrefixTree(OpBuilder &builder, Location loc,
515 SmallVector<Value> &pPrefix,
516 SmallVector<Value> &gPrefix) {
517
518 auto width = static_cast<int64_t>(pPrefix.size());
519 assert(width == static_cast<int64_t>(gPrefix.size()));
520 SmallVector<Value> pPrefixNew = pPrefix;
521 SmallVector<Value> gPrefixNew = gPrefix;
522
523 // Kogge-Stone parallel prefix computation
524 for (int64_t stride = 1; stride < width; stride *= 2) {
525
526 for (int64_t i = stride; i < width; ++i) {
527 int64_t j = i - stride;
528
529 // Group generate: g_i OR (p_i AND g_j)
530 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
531 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
532
533 // Group propagate: p_i AND p_j
534 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
535 }
536
537 pPrefix = pPrefixNew;
538 gPrefix = gPrefixNew;
539 }
540
541 LLVM_DEBUG({
542 int64_t stage = 0;
543 for (int64_t stride = 1; stride < width; stride *= 2) {
544 llvm::dbgs()
545 << "--------------------------------------- Kogge-Stone Stage "
546 << stage << "\n";
547 for (int64_t i = stride; i < width; ++i) {
548 int64_t j = i - stride;
549 // Group generate: g_i OR (p_i AND g_j)
550 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
551 << " OR (P" << i << stage << " AND G" << j << stage
552 << ")\n";
553
554 // Group propagate: p_i AND p_j
555 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
556 << " AND P" << j << stage << "\n";
557 }
558 ++stage;
559 }
560 });
561}
562
563// Implement the Sklansky parallel prefix tree
564// High fan-out, low depth, low area
565void lowerSklanskeyPrefixTree(OpBuilder &builder, Location loc,
566 SmallVector<Value> &pPrefix,
567 SmallVector<Value> &gPrefix) {
568 auto width = static_cast<int64_t>(pPrefix.size());
569 assert(width == static_cast<int64_t>(gPrefix.size()));
570 SmallVector<Value> pPrefixNew = pPrefix;
571 SmallVector<Value> gPrefixNew = gPrefix;
572 for (int64_t stride = 1; stride < width; stride *= 2) {
573 for (int64_t i = stride; i < width; i += 2 * stride) {
574 for (int64_t k = 0; k < stride && i + k < width; ++k) {
575 int64_t idx = i + k;
576 int64_t j = i - 1;
577
578 // Group generate: g_idx OR (p_idx AND g_j)
579 Value andPG =
580 comb::AndOp::create(builder, loc, pPrefix[idx], gPrefix[j]);
581 gPrefixNew[idx] = comb::OrOp::create(builder, loc, gPrefix[idx], andPG);
582
583 // Group propagate: p_idx AND p_j
584 pPrefixNew[idx] =
585 comb::AndOp::create(builder, loc, pPrefix[idx], pPrefix[j]);
586 }
587 }
588
589 pPrefix = pPrefixNew;
590 gPrefix = gPrefixNew;
591 }
592
593 LLVM_DEBUG({
594 int64_t stage = 0;
595 for (int64_t stride = 1; stride < width; stride *= 2) {
596 llvm::dbgs() << "--------------------------------------- Sklanskey Stage "
597 << stage << "\n";
598 for (int64_t i = stride; i < width; i += 2 * stride) {
599 for (int64_t k = 0; k < stride && i + k < width; ++k) {
600 int64_t idx = i + k;
601 int64_t j = i - 1;
602 // Group generate: g_i OR (p_i AND g_j)
603 llvm::dbgs() << "G" << idx << stage + 1 << " = G" << idx << stage
604 << " OR (P" << idx << stage << " AND G" << j << stage
605 << ")\n";
606
607 // Group propagate: p_i AND p_j
608 llvm::dbgs() << "P" << idx << stage + 1 << " = P" << idx << stage
609 << " AND P" << j << stage << "\n";
610 }
611 }
612 ++stage;
613 }
614 });
615}
616
617// Implement the Brent-Kung parallel prefix tree
618// Described in https://en.wikipedia.org/wiki/Brent%E2%80%93Kung_adder
619// Slightly worse delay than Kogge-Stone, but less area.
620void lowerBrentKungPrefixTree(OpBuilder &builder, Location loc,
621 SmallVector<Value> &pPrefix,
622 SmallVector<Value> &gPrefix) {
623 auto width = static_cast<int64_t>(pPrefix.size());
624 assert(width == static_cast<int64_t>(gPrefix.size()));
625 SmallVector<Value> pPrefixNew = pPrefix;
626 SmallVector<Value> gPrefixNew = gPrefix;
627 // Brent-Kung parallel prefix computation
628 // Forward phase
629 int64_t stride;
630 for (stride = 1; stride < width; stride *= 2) {
631 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
632 int64_t j = i - stride;
633
634 // Group generate: g_i OR (p_i AND g_j)
635 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
636 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
637
638 // Group propagate: p_i AND p_j
639 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
640 }
641 pPrefix = pPrefixNew;
642 gPrefix = gPrefixNew;
643 }
644
645 // Backward phase
646 for (; stride > 0; stride /= 2) {
647 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
648 int64_t j = i - stride;
649
650 // Group generate: g_i OR (p_i AND g_j)
651 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
652 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
653
654 // Group propagate: p_i AND p_j
655 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
656 }
657 pPrefix = pPrefixNew;
658 gPrefix = gPrefixNew;
659 }
660
661 LLVM_DEBUG({
662 int64_t stage = 0;
663 for (stride = 1; stride < width; stride *= 2) {
664 llvm::dbgs() << "--------------------------------------- Brent-Kung FW "
665 << stage << " : Stride " << stride << "\n";
666 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
667 int64_t j = i - stride;
668
669 // Group generate: g_i OR (p_i AND g_j)
670 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
671 << " OR (P" << i << stage << " AND G" << j << stage
672 << ")\n";
673
674 // Group propagate: p_i AND p_j
675 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
676 << " AND P" << j << stage << "\n";
677 }
678 ++stage;
679 }
680
681 for (; stride > 0; stride /= 2) {
682 if (stride * 3 - 1 < width)
683 llvm::dbgs() << "--------------------------------------- Brent-Kung BW "
684 << stage << " : Stride " << stride << "\n";
685
686 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
687 int64_t j = i - stride;
688
689 // Group generate: g_i OR (p_i AND g_j)
690 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
691 << " OR (P" << i << stage << " AND G" << j << stage
692 << ")\n";
693
694 // Group propagate: p_i AND p_j
695 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
696 << " AND P" << j << stage << "\n";
697 }
698 --stage;
699 }
700 });
701}
702
703// TODO: Generalize to other parallel prefix trees.
704class LazyKoggeStonePrefixTree {
705public:
706 LazyKoggeStonePrefixTree(OpBuilder &builder, Location loc, int64_t width,
707 ArrayRef<Value> pPrefix, ArrayRef<Value> gPrefix)
708 : builder(builder), loc(loc), width(width) {
709 assert(width > 0 && "width must be positive");
710 for (int64_t i = 0; i < width; ++i)
711 prefixCache[{0, i}] = {pPrefix[i], gPrefix[i]};
712 }
713
714 // Get the final group and propagate values for bit i.
715 std::pair<Value, Value> getFinal(int64_t i) {
716 assert(i >= 0 && i < width && "i out of bounds");
717 // Final level is ceil(log2(width)) in Kogge-Stone.
718 return getGroupAndPropagate(llvm::Log2_64_Ceil(width), i);
719 }
720
721private:
722 // Recursively get the group and propagate values for bit i at level `level`.
723 // Level 0 is the initial level with the input propagate and generate values.
724 // Level n computes the group and propagate values for a stride of 2^(n-1).
725 // Uses memoization to cache intermediate results.
726 std::pair<Value, Value> getGroupAndPropagate(int64_t level, int64_t i);
727 OpBuilder &builder;
728 Location loc;
729 int64_t width;
730 DenseMap<std::pair<int64_t, int64_t>, std::pair<Value, Value>> prefixCache;
731};
732
733std::pair<Value, Value>
734LazyKoggeStonePrefixTree::getGroupAndPropagate(int64_t level, int64_t i) {
735 assert(i < width && "i out of bounds");
736 auto key = std::make_pair(level, i);
737 auto it = prefixCache.find(key);
738 if (it != prefixCache.end())
739 return it->second;
740
741 assert(level > 0 && "If the level is 0, we should have hit the cache");
742
743 int64_t previousStride = 1ULL << (level - 1);
744 if (i < previousStride) {
745 // No dependency, just copy from the previous level.
746 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
747 prefixCache[key] = {propagateI, generateI};
748 return prefixCache[key];
749 }
750 // Get the dependency index.
751 int64_t j = i - previousStride;
752 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
753 auto [propagateJ, generateJ] = getGroupAndPropagate(level - 1, j);
754 // Group generate: g_i OR (p_i AND g_j)
755 Value andPG = comb::AndOp::create(builder, loc, propagateI, generateJ);
756 Value newGenerate = comb::OrOp::create(builder, loc, generateI, andPG);
757 // Group propagate: p_i AND p_j
758 Value newPropagate =
759 comb::AndOp::create(builder, loc, propagateI, propagateJ);
760 prefixCache[key] = {newPropagate, newGenerate};
761 return prefixCache[key];
762}
763
764template <bool lowerToMIG>
765struct CombAddOpConversion : OpConversionPattern<AddOp> {
767
768 LogicalResult
769 matchAndRewrite(AddOp op, OpAdaptor adaptor,
770 ConversionPatternRewriter &rewriter) const override {
771 auto inputs = adaptor.getInputs();
772 // Lower only when there are two inputs.
773 // Variadic operands must be lowered in a different pattern.
774 if (inputs.size() != 2)
775 return failure();
776
777 auto width = op.getType().getIntOrFloatBitWidth();
778 // Skip a zero width value.
779 if (width == 0) {
780 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
781 op.getType(), 0);
782 return success();
783 }
784
785 // Check if the architecture is specified by an attribute.
786 auto arch = determineAdderArch(op, width);
787 if (arch == AdderArchitecture::RippleCarry)
788 return lowerRippleCarryAdder(op, inputs, rewriter);
789 return lowerParallelPrefixAdder(op, inputs, rewriter);
790 }
791
792 // Implement a basic ripple-carry adder for small bitwidths.
793 LogicalResult
794 lowerRippleCarryAdder(comb::AddOp op, ValueRange inputs,
795 ConversionPatternRewriter &rewriter) const {
796 auto width = op.getType().getIntOrFloatBitWidth();
797 // Implement a naive Ripple-carry full adder.
798 Value carry;
799
800 auto aBits = extractBits(rewriter, inputs[0]);
801 auto bBits = extractBits(rewriter, inputs[1]);
802 SmallVector<Value> results;
803 results.resize(width);
804 for (int64_t i = 0; i < width; ++i) {
805 SmallVector<Value> xorOperands = {aBits[i], bBits[i]};
806 if (carry)
807 xorOperands.push_back(carry);
808
809 // sum[i] = xor(carry[i-1], a[i], b[i])
810 // NOTE: The result is stored in reverse order.
811 results[width - i - 1] =
812 comb::XorOp::create(rewriter, op.getLoc(), xorOperands, true);
813
814 // If this is the last bit, we are done.
815 if (i == width - 1)
816 break;
817
818 // carry[i] = (carry[i-1] & (a[i] ^ b[i])) | (a[i] & b[i])
819 if (!carry) {
820 // This is the first bit, so the carry is the next carry.
821 carry = comb::AndOp::create(rewriter, op.getLoc(),
822 ValueRange{aBits[i], bBits[i]}, true);
823 continue;
824 }
825
826 carry = createMajorityFunction(rewriter, op.getLoc(), aBits[i], bBits[i],
827 carry, lowerToMIG);
828 }
829 LLVM_DEBUG(llvm::dbgs() << "Lower comb.add to Ripple-Carry Adder of width "
830 << width << "\n");
831
832 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
833 return success();
834 }
835
836 // Implement a parallel prefix adder - with Kogge-Stone or Brent-Kung trees
837 // Will introduce unused signals for the carry bits but these will be removed
838 // by the AIG pass.
839 LogicalResult
840 lowerParallelPrefixAdder(comb::AddOp op, ValueRange inputs,
841 ConversionPatternRewriter &rewriter) const {
842 auto width = op.getType().getIntOrFloatBitWidth();
843
844 auto aBits = extractBits(rewriter, inputs[0]);
845 auto bBits = extractBits(rewriter, inputs[1]);
846
847 // Construct propagate (p) and generate (g) signals
848 SmallVector<Value> p, g;
849 p.reserve(width);
850 g.reserve(width);
851
852 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
853 // p_i = a_i XOR b_i
854 p.push_back(comb::XorOp::create(rewriter, op.getLoc(), aBit, bBit));
855 // g_i = a_i AND b_i
856 g.push_back(comb::AndOp::create(rewriter, op.getLoc(), aBit, bBit));
857 }
858
859 LLVM_DEBUG({
860 llvm::dbgs() << "Lower comb.add to Parallel-Prefix of width " << width
861 << "\n--------------------------------------- Init\n";
862
863 for (int64_t i = 0; i < width; ++i) {
864 // p_i = a_i XOR b_i
865 llvm::dbgs() << "P0" << i << " = A" << i << " XOR B" << i << "\n";
866 // g_i = a_i AND b_i
867 llvm::dbgs() << "G0" << i << " = A" << i << " AND B" << i << "\n";
868 }
869 });
870
871 // Create copies of p and g for the prefix computation
872 SmallVector<Value> pPrefix = p;
873 SmallVector<Value> gPrefix = g;
874
875 // Check if the architecture is specified by an attribute.
876 auto arch = determineAdderArch(op, width);
877
878 switch (arch) {
879 case AdderArchitecture::RippleCarry:
880 llvm_unreachable("Ripple-Carry should be handled separately");
881 break;
882 case AdderArchitecture::Sklanskey:
883 lowerSklanskeyPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
884 break;
885 case AdderArchitecture::KoggeStone:
886 lowerKoggeStonePrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
887 break;
888 case AdderArchitecture::BrentKung:
889 lowerBrentKungPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
890 break;
891 }
892
893 // Generate result sum bits
894 // NOTE: The result is stored in reverse order.
895 SmallVector<Value> results;
896 results.resize(width);
897 // Sum bit 0 is just p[0] since carry_in = 0
898 results[width - 1] = p[0];
899
900 // For remaining bits, sum_i = p_i XOR g_(i-1)
901 // The carry into position i is the group generate from position i-1
902 for (int64_t i = 1; i < width; ++i)
903 results[width - 1 - i] =
904 comb::XorOp::create(rewriter, op.getLoc(), p[i], gPrefix[i - 1]);
905
906 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
907
908 LLVM_DEBUG({
909 llvm::dbgs() << "--------------------------------------- Completion\n"
910 << "RES0 = P0\n";
911 for (int64_t i = 1; i < width; ++i)
912 llvm::dbgs() << "RES" << i << " = P" << i << " XOR G" << i - 1 << "\n";
913 });
914
915 return success();
916 }
917};
918
919struct CombMulOpConversion : OpConversionPattern<MulOp> {
921 using OpAdaptor = typename OpConversionPattern<MulOp>::OpAdaptor;
922 LogicalResult
923 matchAndRewrite(MulOp op, OpAdaptor adaptor,
924 ConversionPatternRewriter &rewriter) const override {
925 if (adaptor.getInputs().size() != 2)
926 return failure();
927
928 Location loc = op.getLoc();
929 Value a = adaptor.getInputs()[0];
930 Value b = adaptor.getInputs()[1];
931 unsigned width = op.getType().getIntOrFloatBitWidth();
932
933 // Skip a zero width value.
934 if (width == 0) {
935 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(), 0);
936 return success();
937 }
938
939 // Extract individual bits from operands
940 SmallVector<Value> aBits = extractBits(rewriter, a);
941 SmallVector<Value> bBits = extractBits(rewriter, b);
942
943 auto falseValue = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
944
945 // Generate partial products
946 SmallVector<SmallVector<Value>> partialProducts;
947 partialProducts.reserve(width);
948 for (unsigned i = 0; i < width; ++i) {
949 SmallVector<Value> row(i, falseValue);
950 row.reserve(width);
951 // Generate partial product bits
952 for (unsigned j = 0; i + j < width; ++j)
953 row.push_back(
954 rewriter.createOrFold<comb::AndOp>(loc, aBits[j], bBits[i]));
955
956 partialProducts.push_back(row);
957 }
958
959 // If the width is 1, we are done.
960 if (width == 1) {
961 rewriter.replaceOp(op, partialProducts[0][0]);
962 return success();
963 }
964
965 // Wallace tree reduction - reduce to two addends.
966 datapath::CompressorTree comp(width, partialProducts, loc);
967 auto addends = comp.compressToHeight(rewriter, 2);
968
969 // Sum the two addends using a carry-propagate adder
970 auto newAdd = comb::AddOp::create(rewriter, loc, addends, true);
971 replaceOpAndCopyNamehint(rewriter, op, newAdd);
972 return success();
973 }
974};
975
976template <typename OpTy>
977struct DivModOpConversionBase : OpConversionPattern<OpTy> {
978 DivModOpConversionBase(MLIRContext *context, int64_t maxEmulationUnknownBits)
979 : OpConversionPattern<OpTy>(context),
980 maxEmulationUnknownBits(maxEmulationUnknownBits) {
981 assert(maxEmulationUnknownBits < 32 &&
982 "maxEmulationUnknownBits must be less than 32");
983 }
984 const int64_t maxEmulationUnknownBits;
985};
986
987struct CombDivUOpConversion : DivModOpConversionBase<DivUOp> {
988 using DivModOpConversionBase<DivUOp>::DivModOpConversionBase;
989 LogicalResult
990 matchAndRewrite(DivUOp op, OpAdaptor adaptor,
991 ConversionPatternRewriter &rewriter) const override {
992 // Check if the divisor is a power of two.
993 if (auto rhsConstantOp = adaptor.getRhs().getDefiningOp<hw::ConstantOp>())
994 if (rhsConstantOp.getValue().isPowerOf2()) {
995 // Extract upper bits.
996 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
997 size_t width = op.getType().getIntOrFloatBitWidth();
998 Value upperBits = rewriter.createOrFold<comb::ExtractOp>(
999 op.getLoc(), adaptor.getLhs(), extractAmount,
1000 width - extractAmount);
1001 Value constZero = hw::ConstantOp::create(rewriter, op.getLoc(),
1002 APInt::getZero(extractAmount));
1003 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
1004 rewriter, op, op.getType(), ArrayRef<Value>{constZero, upperBits});
1005 return success();
1006 }
1007
1008 // When rhs is not power of two and the number of unknown bits are small,
1009 // create a mux tree that emulates all possible cases.
1011 rewriter, maxEmulationUnknownBits, op,
1012 [](const APInt &lhs, const APInt &rhs) {
1013 // Division by zero is undefined, just return zero.
1014 if (rhs.isZero())
1015 return APInt::getZero(rhs.getBitWidth());
1016 return lhs.udiv(rhs);
1017 });
1018 }
1019};
1020
1021struct CombModUOpConversion : DivModOpConversionBase<ModUOp> {
1022 using DivModOpConversionBase<ModUOp>::DivModOpConversionBase;
1023 LogicalResult
1024 matchAndRewrite(ModUOp op, OpAdaptor adaptor,
1025 ConversionPatternRewriter &rewriter) const override {
1026 // Check if the divisor is a power of two.
1027 if (auto rhsConstantOp = adaptor.getRhs().getDefiningOp<hw::ConstantOp>())
1028 if (rhsConstantOp.getValue().isPowerOf2()) {
1029 // Extract lower bits.
1030 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
1031 size_t width = op.getType().getIntOrFloatBitWidth();
1032 Value lowerBits = rewriter.createOrFold<comb::ExtractOp>(
1033 op.getLoc(), adaptor.getLhs(), 0, extractAmount);
1034 Value constZero = hw::ConstantOp::create(
1035 rewriter, op.getLoc(), APInt::getZero(width - extractAmount));
1036 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
1037 rewriter, op, op.getType(), ArrayRef<Value>{constZero, lowerBits});
1038 return success();
1039 }
1040
1041 // When rhs is not power of two and the number of unknown bits are small,
1042 // create a mux tree that emulates all possible cases.
1044 rewriter, maxEmulationUnknownBits, op,
1045 [](const APInt &lhs, const APInt &rhs) {
1046 // Division by zero is undefined, just return zero.
1047 if (rhs.isZero())
1048 return APInt::getZero(rhs.getBitWidth());
1049 return lhs.urem(rhs);
1050 });
1051 }
1052};
1053
1054struct CombDivSOpConversion : DivModOpConversionBase<DivSOp> {
1055 using DivModOpConversionBase<DivSOp>::DivModOpConversionBase;
1056
1057 LogicalResult
1058 matchAndRewrite(DivSOp op, OpAdaptor adaptor,
1059 ConversionPatternRewriter &rewriter) const override {
1060 // Currently only lower with emulation.
1061 // TODO: Implement a signed division lowering at least for power of two.
1063 rewriter, maxEmulationUnknownBits, op,
1064 [](const APInt &lhs, const APInt &rhs) {
1065 // Division by zero is undefined, just return zero.
1066 if (rhs.isZero())
1067 return APInt::getZero(rhs.getBitWidth());
1068 return lhs.sdiv(rhs);
1069 });
1070 }
1071};
1072
1073struct CombModSOpConversion : DivModOpConversionBase<ModSOp> {
1074 using DivModOpConversionBase<ModSOp>::DivModOpConversionBase;
1075 LogicalResult
1076 matchAndRewrite(ModSOp op, OpAdaptor adaptor,
1077 ConversionPatternRewriter &rewriter) const override {
1078 // Currently only lower with emulation.
1079 // TODO: Implement a signed modulus lowering at least for power of two.
1081 rewriter, maxEmulationUnknownBits, op,
1082 [](const APInt &lhs, const APInt &rhs) {
1083 // Division by zero is undefined, just return zero.
1084 if (rhs.isZero())
1085 return APInt::getZero(rhs.getBitWidth());
1086 return lhs.srem(rhs);
1087 });
1088 }
1089};
1090
1091struct CombICmpOpConversion : OpConversionPattern<ICmpOp> {
1093
1094 // Simple comparator for small bit widths
1095 static Value constructRippleCarry(Location loc, Value a, Value b,
1096 bool includeEq,
1097 ConversionPatternRewriter &rewriter) {
1098 // Construct following unsigned comparison expressions.
1099 // a <= b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] <= b[n-1:0])
1100 // a < b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] < b[n-1:0])
1101 auto aBits = extractBits(rewriter, a);
1102 auto bBits = extractBits(rewriter, b);
1103 Value acc = hw::ConstantOp::create(rewriter, loc, APInt(1, includeEq));
1104
1105 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1106 auto aBitXorBBit =
1107 rewriter.createOrFold<comb::XorOp>(loc, aBit, bBit, true);
1108 auto aEqualB = rewriter.createOrFold<synth::aig::AndInverterOp>(
1109 loc, aBitXorBBit, true);
1110 auto pred = rewriter.createOrFold<synth::aig::AndInverterOp>(
1111 loc, aBit, bBit, true, false);
1112
1113 auto aBitAndBBit = rewriter.createOrFold<comb::AndOp>(
1114 loc, ValueRange{aEqualB, acc}, true);
1115 acc = rewriter.createOrFold<comb::OrOp>(loc, pred, aBitAndBBit, true);
1116 }
1117 return acc;
1118 }
1119
1120 // Compute prefix comparison using parallel prefix algorithm
1121 // Note: This generates all intermediate prefix values even though we only
1122 // need the final result. Optimizing this to skip intermediate computations
1123 // is non-trivial because each iteration depends on results from previous
1124 // iterations. We rely on DCE passes to remove unused operations.
1125 // TODO: Lazily compute only the required prefix values. Kogge-Stone is
1126 // already implemented in a lazy manner below, but other architectures can
1127 // also be optimized.
1128 static Value computePrefixComparison(ConversionPatternRewriter &rewriter,
1129 Location loc, SmallVector<Value> pPrefix,
1130 SmallVector<Value> gPrefix,
1131 bool includeEq, AdderArchitecture arch) {
1132 auto width = pPrefix.size();
1133 Value finalGroup, finalPropagate;
1134 // Apply the appropriate prefix tree algorithm
1135 switch (arch) {
1136 case AdderArchitecture::RippleCarry:
1137 llvm_unreachable("Ripple-Carry should be handled separately");
1138 break;
1139 case AdderArchitecture::Sklanskey: {
1140 lowerSklanskeyPrefixTree(rewriter, loc, pPrefix, gPrefix);
1141 finalGroup = gPrefix[width - 1];
1142 finalPropagate = pPrefix[width - 1];
1143 break;
1144 }
1145 case AdderArchitecture::KoggeStone:
1146 // Use lazy Kogge-Stone implementation to avoid computing all
1147 // intermediate prefix values.
1148 std::tie(finalPropagate, finalGroup) =
1149 LazyKoggeStonePrefixTree(rewriter, loc, width, pPrefix, gPrefix)
1150 .getFinal(width - 1);
1151 break;
1152 case AdderArchitecture::BrentKung: {
1153 lowerBrentKungPrefixTree(rewriter, loc, pPrefix, gPrefix);
1154 finalGroup = gPrefix[width - 1];
1155 finalPropagate = pPrefix[width - 1];
1156 break;
1157 }
1158 }
1159
1160 // Final result: `finalGroup` gives us "a < b"
1161 if (includeEq) {
1162 // a <= b iff (a < b) OR (a == b)
1163 // a == b iff `finalPropagate` (all bits are equal)
1164 return comb::OrOp::create(rewriter, loc, finalGroup, finalPropagate);
1165 }
1166 // a < b iff `finalGroup`
1167 return finalGroup;
1168 }
1169
1170 // Construct an unsigned comparator using either ripple-carry or
1171 // parallel-prefix architecture. Comparison uses parallel prefix tree as an
1172 // internal component, so use `AdderArchitecture` enum to select architecture.
1173 static Value constructUnsignedCompare(Operation *op, Location loc, Value a,
1174 Value b, bool isLess, bool includeEq,
1175 ConversionPatternRewriter &rewriter) {
1176 // Ensure a <= b by swapping for simplicity.
1177 if (!isLess)
1178 std::swap(a, b);
1179 auto width = a.getType().getIntOrFloatBitWidth();
1180
1181 // Check if the architecture is specified by an attribute.
1182 auto arch = determineAdderArch(op, width);
1183 if (arch == AdderArchitecture::RippleCarry)
1184 return constructRippleCarry(loc, a, b, includeEq, rewriter);
1185
1186 // For larger widths, use parallel prefix tree
1187 auto aBits = extractBits(rewriter, a);
1188 auto bBits = extractBits(rewriter, b);
1189
1190 // For comparison, we compute:
1191 // - Equal bits: eq_i = ~(a_i ^ b_i)
1192 // - Greater bits: gt_i = ~a_i & b_i (a_i < b_i)
1193 // - Propagate: p_i = eq_i (equality propagates)
1194 // - Generate: g_i = gt_i (greater-than generates)
1195 SmallVector<Value> eq, gt;
1196 eq.reserve(width);
1197 gt.reserve(width);
1198
1199 auto one =
1200 hw::ConstantOp::create(rewriter, loc, rewriter.getIntegerType(1), 1);
1201
1202 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1203 // eq_i = ~(a_i ^ b_i) = a_i == b_i
1204 auto xorBit = comb::XorOp::create(rewriter, loc, aBit, bBit);
1205 eq.push_back(comb::XorOp::create(rewriter, loc, xorBit, one));
1206
1207 // gt_i = ~a_i & b_i = a_i < b_i
1208 auto notA = comb::XorOp::create(rewriter, loc, aBit, one);
1209 gt.push_back(comb::AndOp::create(rewriter, loc, notA, bBit));
1210 }
1211
1212 return computePrefixComparison(rewriter, loc, std::move(eq), std::move(gt),
1213 includeEq, arch);
1214 }
1215
1216 LogicalResult
1217 matchAndRewrite(ICmpOp op, OpAdaptor adaptor,
1218 ConversionPatternRewriter &rewriter) const override {
1219 auto lhs = adaptor.getLhs();
1220 auto rhs = adaptor.getRhs();
1221
1222 switch (op.getPredicate()) {
1223 default:
1224 return failure();
1225
1226 case ICmpPredicate::eq:
1227 case ICmpPredicate::ceq: {
1228 // a == b ==> ~(a[n] ^ b[n]) & ~(a[n-1] ^ b[n-1]) & ...
1229 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1230 auto xorBits = extractBits(rewriter, xorOp);
1231 SmallVector<bool> allInverts(xorBits.size(), true);
1232 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
1233 rewriter, op, xorBits, allInverts);
1234 return success();
1235 }
1236
1237 case ICmpPredicate::ne:
1238 case ICmpPredicate::cne: {
1239 // a != b ==> (a[n] ^ b[n]) | (a[n-1] ^ b[n-1]) | ...
1240 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1241 replaceOpWithNewOpAndCopyNamehint<comb::OrOp>(
1242 rewriter, op, extractBits(rewriter, xorOp), true);
1243 return success();
1244 }
1245
1246 case ICmpPredicate::uge:
1247 case ICmpPredicate::ugt:
1248 case ICmpPredicate::ule:
1249 case ICmpPredicate::ult: {
1250 bool isLess = op.getPredicate() == ICmpPredicate::ult ||
1251 op.getPredicate() == ICmpPredicate::ule;
1252 bool includeEq = op.getPredicate() == ICmpPredicate::uge ||
1253 op.getPredicate() == ICmpPredicate::ule;
1254 replaceOpAndCopyNamehint(rewriter, op,
1255 constructUnsignedCompare(op, op.getLoc(), lhs,
1256 rhs, isLess, includeEq,
1257 rewriter));
1258 return success();
1259 }
1260 case ICmpPredicate::slt:
1261 case ICmpPredicate::sle:
1262 case ICmpPredicate::sgt:
1263 case ICmpPredicate::sge: {
1264 if (lhs.getType().getIntOrFloatBitWidth() == 0)
1265 return rewriter.notifyMatchFailure(
1266 op.getLoc(), "i0 signed comparison is unsupported");
1267 bool isLess = op.getPredicate() == ICmpPredicate::slt ||
1268 op.getPredicate() == ICmpPredicate::sle;
1269 bool includeEq = op.getPredicate() == ICmpPredicate::sge ||
1270 op.getPredicate() == ICmpPredicate::sle;
1271
1272 // Get a sign bit
1273 auto signA = extractMSB(rewriter, lhs);
1274 auto signB = extractMSB(rewriter, rhs);
1275 auto aRest = extractOtherThanMSB(rewriter, lhs);
1276 auto bRest = extractOtherThanMSB(rewriter, rhs);
1277
1278 // Compare magnitudes (all bits except sign)
1279 auto sameSignResult = constructUnsignedCompare(
1280 op, op.getLoc(), aRest, bRest, isLess, includeEq, rewriter);
1281
1282 // XOR of signs: true if signs are different
1283 auto signsDiffer =
1284 comb::XorOp::create(rewriter, op.getLoc(), signA, signB);
1285
1286 // Result when signs are different
1287 Value diffSignResult = isLess ? signA : signB;
1288
1289 // Final result: choose based on whether signs differ
1290 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
1291 rewriter, op, signsDiffer, diffSignResult, sameSignResult);
1292 return success();
1293 }
1294 }
1295 }
1296};
1297
1298struct CombParityOpConversion : OpConversionPattern<ParityOp> {
1300
1301 LogicalResult
1302 matchAndRewrite(ParityOp op, OpAdaptor adaptor,
1303 ConversionPatternRewriter &rewriter) const override {
1304 // Parity is the XOR of all bits.
1305 replaceOpWithNewOpAndCopyNamehint<comb::XorOp>(
1306 rewriter, op, extractBits(rewriter, adaptor.getInput()), true);
1307 return success();
1308 }
1309};
1310
1311struct CombShlOpConversion : OpConversionPattern<comb::ShlOp> {
1313
1314 LogicalResult
1315 matchAndRewrite(comb::ShlOp op, OpAdaptor adaptor,
1316 ConversionPatternRewriter &rewriter) const override {
1317 auto width = op.getType().getIntOrFloatBitWidth();
1318 auto lhs = adaptor.getLhs();
1319 auto result = createShiftLogic</*isLeftShift=*/true>(
1320 rewriter, op.getLoc(), adaptor.getRhs(), width,
1321 /*getPadding=*/
1322 [&](int64_t index) {
1323 // Don't create zero width value.
1324 if (index == 0)
1325 return Value();
1326 // Padding is 0 for left shift.
1327 return rewriter.createOrFold<hw::ConstantOp>(
1328 op.getLoc(), rewriter.getIntegerType(index), 0);
1329 },
1330 /*getExtract=*/
1331 [&](int64_t index) {
1332 assert(index < width && "index out of bounds");
1333 // Exract the bits from LSB.
1334 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, 0,
1335 width - index);
1336 });
1337
1338 replaceOpAndCopyNamehint(rewriter, op, result);
1339 return success();
1340 }
1341};
1342
1343struct CombShrUOpConversion : OpConversionPattern<comb::ShrUOp> {
1345
1346 LogicalResult
1347 matchAndRewrite(comb::ShrUOp op, OpAdaptor adaptor,
1348 ConversionPatternRewriter &rewriter) const override {
1349 auto width = op.getType().getIntOrFloatBitWidth();
1350 auto lhs = adaptor.getLhs();
1351 auto result = createShiftLogic</*isLeftShift=*/false>(
1352 rewriter, op.getLoc(), adaptor.getRhs(), width,
1353 /*getPadding=*/
1354 [&](int64_t index) {
1355 // Don't create zero width value.
1356 if (index == 0)
1357 return Value();
1358 // Padding is 0 for right shift.
1359 return rewriter.createOrFold<hw::ConstantOp>(
1360 op.getLoc(), rewriter.getIntegerType(index), 0);
1361 },
1362 /*getExtract=*/
1363 [&](int64_t index) {
1364 assert(index < width && "index out of bounds");
1365 // Exract the bits from MSB.
1366 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1367 width - index);
1368 });
1369
1370 replaceOpAndCopyNamehint(rewriter, op, result);
1371 return success();
1372 }
1373};
1374
1375struct CombShrSOpConversion : OpConversionPattern<comb::ShrSOp> {
1377
1378 LogicalResult
1379 matchAndRewrite(comb::ShrSOp op, OpAdaptor adaptor,
1380 ConversionPatternRewriter &rewriter) const override {
1381 auto width = op.getType().getIntOrFloatBitWidth();
1382 if (width == 0)
1383 return rewriter.notifyMatchFailure(op.getLoc(),
1384 "i0 signed shift is unsupported");
1385 auto lhs = adaptor.getLhs();
1386 // Get the sign bit.
1387 auto sign =
1388 rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, width - 1, 1);
1389
1390 // NOTE: The max shift amount is width - 1 because the sign bit is
1391 // already shifted out.
1392 auto result = createShiftLogic</*isLeftShift=*/false>(
1393 rewriter, op.getLoc(), adaptor.getRhs(), width - 1,
1394 /*getPadding=*/
1395 [&](int64_t index) {
1396 return rewriter.createOrFold<comb::ReplicateOp>(op.getLoc(), sign,
1397 index + 1);
1398 },
1399 /*getExtract=*/
1400 [&](int64_t index) {
1401 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1402 width - index - 1);
1403 });
1404
1405 replaceOpAndCopyNamehint(rewriter, op, result);
1406 return success();
1407 }
1408};
1409
1410} // namespace
1411
1412//===----------------------------------------------------------------------===//
1413// Convert Comb to AIG pass
1414//===----------------------------------------------------------------------===//
1415
1416namespace {
1417struct ConvertCombToSynthPass
1418 : public impl::ConvertCombToSynthBase<ConvertCombToSynthPass> {
1419 void runOnOperation() override;
1420 using ConvertCombToSynthBase<ConvertCombToSynthPass>::ConvertCombToSynthBase;
1421};
1422} // namespace
1423
1424static void
1426 uint32_t maxEmulationUnknownBits,
1427 bool lowerToMIG) {
1428 patterns.add<
1429 // Bitwise Logical Ops
1430 CombAndOpConversion, CombXorOpConversion, CombMuxOpConversion,
1431 CombParityOpConversion,
1432 // Arithmetic Ops
1433 CombMulOpConversion, CombICmpOpConversion,
1434 // Shift Ops
1435 CombShlOpConversion, CombShrUOpConversion, CombShrSOpConversion,
1436 // Variadic ops that must be lowered to binary operations
1437 CombLowerVariadicOp<XorOp>, CombLowerVariadicOp<AddOp>,
1438 CombLowerVariadicOp<MulOp>>(patterns.getContext());
1439
1440 patterns.add(comb::convertSubToAdd);
1441
1442 if (lowerToMIG) {
1443 patterns.add<CombOrToMIGConversion, CombLowerVariadicOp<OrOp>,
1444 AndInverterToMIGConversion,
1446 CombAddOpConversion</*useMIG=*/true>>(patterns.getContext());
1447 } else {
1448 patterns.add<CombOrToAIGConversion, CombAddOpConversion</*useMIG=*/false>>(
1449 patterns.getContext());
1450 }
1451
1452 // Add div/mod patterns with a threshold given by the pass option.
1453 patterns.add<CombDivUOpConversion, CombModUOpConversion, CombDivSOpConversion,
1454 CombModSOpConversion>(patterns.getContext(),
1455 maxEmulationUnknownBits);
1456}
1457
1458void ConvertCombToSynthPass::runOnOperation() {
1459 ConversionTarget target(getContext());
1460
1461 // Comb is source dialect.
1462 target.addIllegalDialect<comb::CombDialect>();
1463 // Keep data movement operations like Extract, Concat and Replicate.
1464 target.addLegalOp<comb::ExtractOp, comb::ConcatOp, comb::ReplicateOp,
1466
1467 // Treat array operations as illegal. Strictly speaking, other than array
1468 // get operation with non-const index are legal in AIG but array types
1469 // prevent a bunch of optimizations so just lower them to integer
1470 // operations. It's required to run HWAggregateToComb pass before this pass.
1472 hw::AggregateConstantOp>();
1473
1474 target.addLegalDialect<synth::SynthDialect>();
1475
1476 if (targetIR == CombToSynthTargetIR::AIG) {
1477 // AIG is target dialect.
1478 target.addIllegalOp<synth::mig::MajorityInverterOp>();
1479 } else if (targetIR == CombToSynthTargetIR::MIG) {
1480 target.addIllegalOp<synth::aig::AndInverterOp>();
1481 }
1482
1483 // If additional legal ops are specified, add them to the target.
1484 if (!additionalLegalOps.empty())
1485 for (const auto &opName : additionalLegalOps)
1486 target.addLegalOp(OperationName(opName, &getContext()));
1487
1488 RewritePatternSet patterns(&getContext());
1489 populateCombToAIGConversionPatterns(patterns, maxEmulationUnknownBits,
1490 targetIR == CombToSynthTargetIR::MIG);
1491
1492 if (failed(mlir::applyPartialConversion(getOperation(), target,
1493 std::move(patterns))))
1494 return signalPassFailure();
1495}
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
Definition CalyxOps.cpp:540
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc, Value shiftAmount, int64_t maxShiftAmount, llvm::function_ref< Value(int64_t)> getPadding, llvm::function_ref< Value(int64_t)> getExtract)
static APInt substitueMaskToValues(size_t width, llvm::SmallVectorImpl< ConstantOrValue > &constantOrValues, uint32_t mask)
static void populateCombToAIGConversionPatterns(RewritePatternSet &patterns, uint32_t maxEmulationUnknownBits, bool lowerToMIG)
static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a, Value b, Value carry, bool useMajorityInverterOp)
static LogicalResult emulateBinaryOpForUnknownBits(ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits, Operation *op, llvm::function_ref< APInt(const APInt &, const APInt &)> emulate)
static int64_t getNumUnknownBitsAndPopulateValues(Value value, llvm::SmallVectorImpl< ConstantOrValue > &values)
static Value extractOtherThanMSB(OpBuilder &builder, Value val)
static Value extractMSB(OpBuilder &builder, Value val)
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
static Value lowerFullyAssociativeOp(Operation &op, OperandRange operands, SmallVector< Operation * > &newOps)
Lower a variadic fully-associative operation into an expression tree.
create(data_type, value)
Definition hw.py:441
create(data_type, value)
Definition hw.py:433
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
Definition Naming.cpp:73
Definition comb.py:1