CIRCT 23.0.0git
Loading...
Searching...
No Matches
CombToSynth.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the main Comb to Synth Conversion Pass Implementation.
10//
11// High-level Comb Operations
12// |
13// v
14// +-------------------+
15// | and, or, xor, mux |
16// +---------+---------+
17// |
18// +-----+
19// | AIG |
20// +-----+
21//
22//===----------------------------------------------------------------------===//
23
31#include "mlir/Pass/Pass.h"
32#include "mlir/Transforms/DialectConversion.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/ADT/PointerUnion.h"
35#include "llvm/Support/Debug.h"
36#include <array>
37
38#define DEBUG_TYPE "comb-to-synth"
39
40namespace circt {
41#define GEN_PASS_DEF_CONVERTCOMBTOSYNTH
42#include "circt/Conversion/Passes.h.inc"
43} // namespace circt
44
45using namespace circt;
46using namespace comb;
47
48//===----------------------------------------------------------------------===//
49// Utility Functions
50//===----------------------------------------------------------------------===//
51
52// A wrapper for comb::extractBits that returns a SmallVector<Value>.
53static SmallVector<Value> extractBits(OpBuilder &builder, Value val) {
54 SmallVector<Value> bits;
55 comb::extractBits(builder, val, bits);
56 return bits;
57}
58
59// Construct a mux tree for shift operations. `isLeftShift` controls the
60// direction of the shift operation and is used to determine order of the
61// padding and extracted bits. Callbacks `getPadding` and `getExtract` are used
62// to get the padding and extracted bits for each shift amount. `getPadding`
63// could return a nullptr as i0 value but except for that, these callbacks must
64// return a valid value for each shift amount in the range [0, maxShiftAmount].
65// The value for `maxShiftAmount` is used as the out-of-bounds value.
66template <bool isLeftShift>
67static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc,
68 Value shiftAmount, int64_t maxShiftAmount,
69 llvm::function_ref<Value(int64_t)> getPadding,
70 llvm::function_ref<Value(int64_t)> getExtract) {
71 // Extract individual bits from shift amount
72 auto bits = extractBits(rewriter, shiftAmount);
73
74 // Create nodes for each possible shift amount
75 SmallVector<Value> nodes;
76 nodes.reserve(maxShiftAmount);
77 for (int64_t i = 0; i < maxShiftAmount; ++i) {
78 Value extract = getExtract(i);
79 Value padding = getPadding(i);
80
81 if (!padding) {
82 nodes.push_back(extract);
83 continue;
84 }
85
86 // Concatenate extracted bits with padding
87 if (isLeftShift)
88 nodes.push_back(
89 rewriter.createOrFold<comb::ConcatOp>(loc, extract, padding));
90 else
91 nodes.push_back(
92 rewriter.createOrFold<comb::ConcatOp>(loc, padding, extract));
93 }
94
95 // Create out-of-bounds value
96 auto outOfBoundsValue = getPadding(maxShiftAmount);
97 assert(outOfBoundsValue && "outOfBoundsValue must be valid");
98
99 // Construct mux tree for shift operation
100 auto result =
101 comb::constructMuxTree(rewriter, loc, bits, nodes, outOfBoundsValue);
102
103 // Add bounds checking
104 auto inBound = rewriter.createOrFold<comb::ICmpOp>(
105 loc, ICmpPredicate::ult, shiftAmount,
106 hw::ConstantOp::create(rewriter, loc, shiftAmount.getType(),
107 maxShiftAmount));
108
109 return rewriter.createOrFold<comb::MuxOp>(loc, inBound, result,
110 outOfBoundsValue);
111}
112
113// Return a majority function implemented with Comb operations. `carry` has
114// slightly smaller depth than the other inputs.
115static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a,
116 Value b, Value carry) {
117 // maj(a, b, c) = (c & (a ^ b)) | (a & b)
118 auto aXnorB = comb::XorOp::create(rewriter, loc, ValueRange{a, b}, true);
119 auto andOp =
120 comb::AndOp::create(rewriter, loc, ValueRange{carry, aXnorB}, true);
121 auto aAndB = comb::AndOp::create(rewriter, loc, ValueRange{a, b}, true);
122 return comb::OrOp::create(rewriter, loc, ValueRange{andOp, aAndB}, true);
123}
124
125static Value extractMSB(OpBuilder &builder, Value val) {
126 return builder.createOrFold<comb::ExtractOp>(
127 val.getLoc(), val, val.getType().getIntOrFloatBitWidth() - 1, 1);
128}
129
130static Value extractOtherThanMSB(OpBuilder &builder, Value val) {
131 return builder.createOrFold<comb::ExtractOp>(
132 val.getLoc(), val, 0, val.getType().getIntOrFloatBitWidth() - 1);
133}
134
135namespace {
136// A union of Value and IntegerAttr to cleanly handle constant values.
137using ConstantOrValue = llvm::PointerUnion<Value, mlir::IntegerAttr>;
138} // namespace
139
140// Return the number of unknown bits and populate the concatenated values.
142 Value value, llvm::SmallVectorImpl<ConstantOrValue> &values) {
143 // Constant or zero width value are all known.
144 if (value.getType().isInteger(0))
145 return 0;
146
147 // Recursively count unknown bits for concat.
148 if (auto concat = value.getDefiningOp<comb::ConcatOp>()) {
149 int64_t totalUnknownBits = 0;
150 for (auto concatInput : llvm::reverse(concat.getInputs())) {
151 auto unknownBits =
152 getNumUnknownBitsAndPopulateValues(concatInput, values);
153 if (unknownBits < 0)
154 return unknownBits;
155 totalUnknownBits += unknownBits;
156 }
157 return totalUnknownBits;
158 }
159
160 // Constant value is known.
161 if (auto constant = value.getDefiningOp<hw::ConstantOp>()) {
162 values.push_back(constant.getValueAttr());
163 return 0;
164 }
165
166 // Consider other operations as unknown bits.
167 // TODO: We can handle replicate, extract, etc.
168 values.push_back(value);
169 return hw::getBitWidth(value.getType());
170}
171
172// Return a value that substitutes the unknown bits with the mask.
173static APInt
175 llvm::SmallVectorImpl<ConstantOrValue> &constantOrValues,
176 uint32_t mask) {
177 uint32_t bitPos = 0, unknownPos = 0;
178 APInt result(width, 0);
179 for (auto constantOrValue : constantOrValues) {
180 int64_t elemWidth;
181 if (auto constant = dyn_cast<IntegerAttr>(constantOrValue)) {
182 elemWidth = constant.getValue().getBitWidth();
183 result.insertBits(constant.getValue(), bitPos);
184 } else {
185 elemWidth = hw::getBitWidth(cast<Value>(constantOrValue).getType());
186 assert(elemWidth >= 0 && "unknown bit width");
187 assert(elemWidth + unknownPos < 32 && "unknown bit width too large");
188 // Create a mask for the unknown bits.
189 uint32_t usedBits = (mask >> unknownPos) & ((1 << elemWidth) - 1);
190 result.insertBits(APInt(elemWidth, usedBits), bitPos);
191 unknownPos += elemWidth;
192 }
193 bitPos += elemWidth;
194 }
195
196 return result;
197}
198
199// Emulate a binary operation with unknown bits using a table lookup.
200// This function enumerates all possible combinations of unknown bits and
201// emulates the operation for each combination.
202static LogicalResult emulateBinaryOpForUnknownBits(
203 ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits,
204 Operation *op,
205 llvm::function_ref<APInt(const APInt &, const APInt &)> emulate) {
206 SmallVector<ConstantOrValue> lhsValues, rhsValues;
207
208 assert(op->getNumResults() == 1 && op->getNumOperands() == 2 &&
209 "op must be a single result binary operation");
210
211 auto lhs = op->getOperand(0);
212 auto rhs = op->getOperand(1);
213 auto width = op->getResult(0).getType().getIntOrFloatBitWidth();
214 auto loc = op->getLoc();
215 auto numLhsUnknownBits = getNumUnknownBitsAndPopulateValues(lhs, lhsValues);
216 auto numRhsUnknownBits = getNumUnknownBitsAndPopulateValues(rhs, rhsValues);
217
218 // If unknown bit width is detected, abort the lowering.
219 if (numLhsUnknownBits < 0 || numRhsUnknownBits < 0)
220 return failure();
221
222 int64_t totalUnknownBits = numLhsUnknownBits + numRhsUnknownBits;
223 if (totalUnknownBits > maxEmulationUnknownBits)
224 return failure();
225
226 SmallVector<Value> emulatedResults;
227 emulatedResults.reserve(1 << totalUnknownBits);
228
229 // Emulate all possible cases.
230 DenseMap<IntegerAttr, hw::ConstantOp> constantPool;
231 auto getConstant = [&](const APInt &value) -> hw::ConstantOp {
232 auto attr = rewriter.getIntegerAttr(rewriter.getIntegerType(width), value);
233 auto it = constantPool.find(attr);
234 if (it != constantPool.end())
235 return it->second;
236 auto constant = hw::ConstantOp::create(rewriter, loc, value);
237 constantPool[attr] = constant;
238 return constant;
239 };
240
241 for (uint32_t lhsMask = 0, lhsMaskEnd = 1 << numLhsUnknownBits;
242 lhsMask < lhsMaskEnd; ++lhsMask) {
243 APInt lhsValue = substitueMaskToValues(width, lhsValues, lhsMask);
244 for (uint32_t rhsMask = 0, rhsMaskEnd = 1 << numRhsUnknownBits;
245 rhsMask < rhsMaskEnd; ++rhsMask) {
246 APInt rhsValue = substitueMaskToValues(width, rhsValues, rhsMask);
247 // Emulate.
248 emulatedResults.push_back(getConstant(emulate(lhsValue, rhsValue)));
249 }
250 }
251
252 // Create selectors for mux tree.
253 SmallVector<Value> selectors;
254 selectors.reserve(totalUnknownBits);
255 for (auto &concatedValues : {rhsValues, lhsValues})
256 for (auto valueOrConstant : concatedValues) {
257 auto value = dyn_cast<Value>(valueOrConstant);
258 if (!value)
259 continue;
260 extractBits(rewriter, value, selectors);
261 }
262
263 assert(totalUnknownBits == static_cast<int64_t>(selectors.size()) &&
264 "number of selectors must match");
265 auto muxed = constructMuxTree(rewriter, loc, selectors, emulatedResults,
266 getConstant(APInt::getZero(width)));
267
268 replaceOpAndCopyNamehint(rewriter, op, muxed);
269 return success();
270}
271
272//===----------------------------------------------------------------------===//
273// Conversion patterns
274//===----------------------------------------------------------------------===//
275
276namespace {
277
278/// Lower a comb::AndOp operation to synth::aig::AndInverterOp
279struct CombAndOpConversion : OpConversionPattern<AndOp> {
281
282 LogicalResult
283 matchAndRewrite(AndOp op, OpAdaptor adaptor,
284 ConversionPatternRewriter &rewriter) const override {
285 SmallVector<bool> nonInverts(adaptor.getInputs().size(), false);
286 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
287 rewriter, op, adaptor.getInputs(), nonInverts);
288 return success();
289 }
290};
291
292/// Lower a comb::OrOp operation to synth::aig::AndInverterOp with invert flags
293struct CombOrToAIGConversion : OpConversionPattern<OrOp> {
295
296 LogicalResult
297 matchAndRewrite(OrOp op, OpAdaptor adaptor,
298 ConversionPatternRewriter &rewriter) const override {
299 // Implement Or using And and invert flags: a | b = ~(~a & ~b)
300 SmallVector<bool> allInverts(adaptor.getInputs().size(), true);
301 auto andOp = synth::aig::AndInverterOp::create(
302 rewriter, op.getLoc(), adaptor.getInputs(), allInverts);
303 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
304 rewriter, op, andOp,
305 /*invert=*/true);
306 return success();
307 }
308};
309
310struct CombXorOpToSynthConversion : OpConversionPattern<XorOp> {
312
313 LogicalResult
314 matchAndRewrite(XorOp op, OpAdaptor adaptor,
315 ConversionPatternRewriter &rewriter) const override {
316 SmallVector<bool> inverted(adaptor.getInputs().size(), false);
317 replaceOpWithNewOpAndCopyNamehint<synth::XorInverterOp>(
318 rewriter, op, adaptor.getInputs(), inverted);
319 return success();
320 }
321};
322
323/// Lower a synth::XorOp operation to AIG operations
324struct SynthXorInverterOpConversion
325 : OpConversionPattern<synth::XorInverterOp> {
326 using OpConversionPattern<synth::XorInverterOp>::OpConversionPattern;
327
328 LogicalResult
329 matchAndRewrite(synth::XorInverterOp op, OpAdaptor adaptor,
330 ConversionPatternRewriter &rewriter) const override {
331 if (op.getNumOperands() != 2)
332 return failure();
333 // Xor using And with invert flags: a ^ b = (a | b) & (~a | ~b)
334
335 // (a | b) = ~(~a & ~b)
336 // (~a | ~b) = ~(a & b)
337 auto inputs = adaptor.getInputs();
338 auto allNotInverts = op.getInverted();
339 std::array<bool, 2> allInverts = {!allNotInverts[0], !allNotInverts[1]};
340
341 auto notAAndNotB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
342 inputs, allInverts);
343 auto aAndB = synth::aig::AndInverterOp::create(rewriter, op.getLoc(),
344 inputs, allNotInverts);
345
346 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
347 rewriter, op, notAAndNotB, aAndB,
348 /*lhs_invert=*/true,
349 /*rhs_invert=*/true);
350 return success();
351 }
352};
353
354template <typename OpTy>
355struct CombLowerVariadicOp : OpConversionPattern<OpTy> {
357 using OpAdaptor = typename OpConversionPattern<OpTy>::OpAdaptor;
358 LogicalResult
359 matchAndRewrite(OpTy op, OpAdaptor adaptor,
360 ConversionPatternRewriter &rewriter) const override {
361 auto result = lowerFullyAssociativeOp(op, op.getOperands(), rewriter);
362 replaceOpAndCopyNamehint(rewriter, op, result);
363 return success();
364 }
365
366 static Value lowerFullyAssociativeOp(OpTy op, OperandRange operands,
367 ConversionPatternRewriter &rewriter) {
368 Value lhs, rhs;
369 switch (operands.size()) {
370 case 0:
371 llvm_unreachable("cannot be called with empty operand range");
372 break;
373 case 1:
374 return operands[0];
375 case 2:
376 lhs = operands[0];
377 rhs = operands[1];
378 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
379 default:
380 auto firstHalf = operands.size() / 2;
381 lhs =
382 lowerFullyAssociativeOp(op, operands.take_front(firstHalf), rewriter);
383 rhs =
384 lowerFullyAssociativeOp(op, operands.drop_front(firstHalf), rewriter);
385 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
386 }
387 }
388};
389
390// Lower comb::MuxOp to AIG operations.
391struct CombMuxOpConversion : OpConversionPattern<MuxOp> {
393
394 LogicalResult
395 matchAndRewrite(MuxOp op, OpAdaptor adaptor,
396 ConversionPatternRewriter &rewriter) const override {
397 Value cond = op.getCond();
398 auto trueVal = op.getTrueValue();
399 auto falseVal = op.getFalseValue();
400
401 if (!op.getType().isInteger()) {
402 // If the type of the mux is not integer, bitcast the operands first.
403 auto widthType = rewriter.getIntegerType(hw::getBitWidth(op.getType()));
404 trueVal =
405 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, trueVal);
406 falseVal =
407 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, falseVal);
408 }
409
410 // Replicate condition if needed
411 if (!trueVal.getType().isInteger(1))
412 cond = comb::ReplicateOp::create(rewriter, op.getLoc(), trueVal.getType(),
413 cond);
414
415 // c ? a : b => (replicate(c) & a) | (~replicate(c) & b)
416 auto lhs =
417 synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond, trueVal);
418 auto rhs = synth::aig::AndInverterOp::create(rewriter, op.getLoc(), cond,
419 falseVal, true, false);
420
421 Value result = comb::OrOp::create(rewriter, op.getLoc(), lhs, rhs);
422 // Insert the bitcast if the type of the mux is not integer.
423 if (result.getType() != op.getType())
424 result =
425 hw::BitcastOp::create(rewriter, op.getLoc(), op.getType(), result);
426 replaceOpAndCopyNamehint(rewriter, op, result);
427 return success();
428 }
429};
430
431//===----------------------------------------------------------------------===//
432// Adder Architecture Selection
433//===----------------------------------------------------------------------===//
434
435enum AdderArchitecture { RippleCarry, Sklanskey, KoggeStone, BrentKung };
436AdderArchitecture determineAdderArch(Operation *op, int64_t width) {
437 auto strAttr = op->getAttrOfType<StringAttr>("synth.test.arch");
438 if (strAttr) {
439 return llvm::StringSwitch<AdderArchitecture>(strAttr.getValue())
440 .Case("SKLANSKEY", Sklanskey)
441 .Case("KOGGE-STONE", KoggeStone)
442 .Case("BRENT-KUNG", BrentKung)
443 .Case("RIPPLE-CARRY", RippleCarry);
444 }
445 // Determine using width as a heuristic.
446 // TODO: Perform a more thorough analysis to motivate the choices or
447 // implement an adder synthesis algorithm to construct an optimal adder
448 // under the given timing constraints - see the work of Zimmermann
449
450 // For very small adders, overhead of a parallel prefix adder is likely not
451 // worth it.
452 if (width < 8)
453 return AdderArchitecture::RippleCarry;
454
455 // Sklanskey is a good compromise for high-performance, but has high fanout
456 // which may lead to wiring congestion for very large adders.
457 if (width <= 32)
458 return AdderArchitecture::Sklanskey;
459
460 // Kogge-Stone uses greater area than Sklanskey but has lower fanout thus
461 // may be preferable for larger adders.
462 return AdderArchitecture::KoggeStone;
463}
464
465//===----------------------------------------------------------------------===//
466// Parallel Prefix Tree
467//===----------------------------------------------------------------------===//
468
469// Implement the Kogge-Stone parallel prefix tree
470// Described in https://en.wikipedia.org/wiki/Kogge%E2%80%93Stone_adder
471// Slightly better delay than Brent-Kung, but more area.
472void lowerKoggeStonePrefixTree(OpBuilder &builder, Location loc,
473 SmallVector<Value> &pPrefix,
474 SmallVector<Value> &gPrefix) {
475
476 auto width = static_cast<int64_t>(pPrefix.size());
477 assert(width == static_cast<int64_t>(gPrefix.size()));
478 SmallVector<Value> pPrefixNew = pPrefix;
479 SmallVector<Value> gPrefixNew = gPrefix;
480
481 // Kogge-Stone parallel prefix computation
482 for (int64_t stride = 1; stride < width; stride *= 2) {
483
484 for (int64_t i = stride; i < width; ++i) {
485 int64_t j = i - stride;
486
487 // Group generate: g_i OR (p_i AND g_j)
488 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
489 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
490
491 // Group propagate: p_i AND p_j
492 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
493 }
494
495 pPrefix = pPrefixNew;
496 gPrefix = gPrefixNew;
497 }
498
499 LLVM_DEBUG({
500 int64_t stage = 0;
501 for (int64_t stride = 1; stride < width; stride *= 2) {
502 llvm::dbgs()
503 << "--------------------------------------- Kogge-Stone Stage "
504 << stage << "\n";
505 for (int64_t i = stride; i < width; ++i) {
506 int64_t j = i - stride;
507 // Group generate: g_i OR (p_i AND g_j)
508 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
509 << " OR (P" << i << stage << " AND G" << j << stage
510 << ")\n";
511
512 // Group propagate: p_i AND p_j
513 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
514 << " AND P" << j << stage << "\n";
515 }
516 ++stage;
517 }
518 });
519}
520
521// Implement the Sklansky parallel prefix tree
522// High fan-out, low depth, low area
523void lowerSklanskeyPrefixTree(OpBuilder &builder, Location loc,
524 SmallVector<Value> &pPrefix,
525 SmallVector<Value> &gPrefix) {
526 auto width = static_cast<int64_t>(pPrefix.size());
527 assert(width == static_cast<int64_t>(gPrefix.size()));
528 SmallVector<Value> pPrefixNew = pPrefix;
529 SmallVector<Value> gPrefixNew = gPrefix;
530 for (int64_t stride = 1; stride < width; stride *= 2) {
531 for (int64_t i = stride; i < width; i += 2 * stride) {
532 for (int64_t k = 0; k < stride && i + k < width; ++k) {
533 int64_t idx = i + k;
534 int64_t j = i - 1;
535
536 // Group generate: g_idx OR (p_idx AND g_j)
537 Value andPG =
538 comb::AndOp::create(builder, loc, pPrefix[idx], gPrefix[j]);
539 gPrefixNew[idx] = comb::OrOp::create(builder, loc, gPrefix[idx], andPG);
540
541 // Group propagate: p_idx AND p_j
542 pPrefixNew[idx] =
543 comb::AndOp::create(builder, loc, pPrefix[idx], pPrefix[j]);
544 }
545 }
546
547 pPrefix = pPrefixNew;
548 gPrefix = gPrefixNew;
549 }
550
551 LLVM_DEBUG({
552 int64_t stage = 0;
553 for (int64_t stride = 1; stride < width; stride *= 2) {
554 llvm::dbgs() << "--------------------------------------- Sklanskey Stage "
555 << stage << "\n";
556 for (int64_t i = stride; i < width; i += 2 * stride) {
557 for (int64_t k = 0; k < stride && i + k < width; ++k) {
558 int64_t idx = i + k;
559 int64_t j = i - 1;
560 // Group generate: g_i OR (p_i AND g_j)
561 llvm::dbgs() << "G" << idx << stage + 1 << " = G" << idx << stage
562 << " OR (P" << idx << stage << " AND G" << j << stage
563 << ")\n";
564
565 // Group propagate: p_i AND p_j
566 llvm::dbgs() << "P" << idx << stage + 1 << " = P" << idx << stage
567 << " AND P" << j << stage << "\n";
568 }
569 }
570 ++stage;
571 }
572 });
573}
574
575// Implement the Brent-Kung parallel prefix tree
576// Described in https://en.wikipedia.org/wiki/Brent%E2%80%93Kung_adder
577// Slightly worse delay than Kogge-Stone, but less area.
578void lowerBrentKungPrefixTree(OpBuilder &builder, Location loc,
579 SmallVector<Value> &pPrefix,
580 SmallVector<Value> &gPrefix) {
581 auto width = static_cast<int64_t>(pPrefix.size());
582 assert(width == static_cast<int64_t>(gPrefix.size()));
583 SmallVector<Value> pPrefixNew = pPrefix;
584 SmallVector<Value> gPrefixNew = gPrefix;
585 // Brent-Kung parallel prefix computation
586 // Forward phase
587 int64_t stride;
588 for (stride = 1; stride < width; stride *= 2) {
589 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
590 int64_t j = i - stride;
591
592 // Group generate: g_i OR (p_i AND g_j)
593 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
594 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
595
596 // Group propagate: p_i AND p_j
597 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
598 }
599 pPrefix = pPrefixNew;
600 gPrefix = gPrefixNew;
601 }
602
603 // Backward phase
604 for (; stride > 0; stride /= 2) {
605 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
606 int64_t j = i - stride;
607
608 // Group generate: g_i OR (p_i AND g_j)
609 Value andPG = comb::AndOp::create(builder, loc, pPrefix[i], gPrefix[j]);
610 gPrefixNew[i] = comb::OrOp::create(builder, loc, gPrefix[i], andPG);
611
612 // Group propagate: p_i AND p_j
613 pPrefixNew[i] = comb::AndOp::create(builder, loc, pPrefix[i], pPrefix[j]);
614 }
615 pPrefix = pPrefixNew;
616 gPrefix = gPrefixNew;
617 }
618
619 LLVM_DEBUG({
620 int64_t stage = 0;
621 for (stride = 1; stride < width; stride *= 2) {
622 llvm::dbgs() << "--------------------------------------- Brent-Kung FW "
623 << stage << " : Stride " << stride << "\n";
624 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
625 int64_t j = i - stride;
626
627 // Group generate: g_i OR (p_i AND g_j)
628 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
629 << " OR (P" << i << stage << " AND G" << j << stage
630 << ")\n";
631
632 // Group propagate: p_i AND p_j
633 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
634 << " AND P" << j << stage << "\n";
635 }
636 ++stage;
637 }
638
639 for (; stride > 0; stride /= 2) {
640 if (stride * 3 - 1 < width)
641 llvm::dbgs() << "--------------------------------------- Brent-Kung BW "
642 << stage << " : Stride " << stride << "\n";
643
644 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
645 int64_t j = i - stride;
646
647 // Group generate: g_i OR (p_i AND g_j)
648 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
649 << " OR (P" << i << stage << " AND G" << j << stage
650 << ")\n";
651
652 // Group propagate: p_i AND p_j
653 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
654 << " AND P" << j << stage << "\n";
655 }
656 --stage;
657 }
658 });
659}
660
661// TODO: Generalize to other parallel prefix trees.
662class LazyKoggeStonePrefixTree {
663public:
664 LazyKoggeStonePrefixTree(OpBuilder &builder, Location loc, int64_t width,
665 ArrayRef<Value> pPrefix, ArrayRef<Value> gPrefix)
666 : builder(builder), loc(loc), width(width) {
667 assert(width > 0 && "width must be positive");
668 for (int64_t i = 0; i < width; ++i)
669 prefixCache[{0, i}] = {pPrefix[i], gPrefix[i]};
670 }
671
672 // Get the final group and propagate values for bit i.
673 std::pair<Value, Value> getFinal(int64_t i) {
674 assert(i >= 0 && i < width && "i out of bounds");
675 // Final level is ceil(log2(width)) in Kogge-Stone.
676 return getGroupAndPropagate(llvm::Log2_64_Ceil(width), i);
677 }
678
679private:
680 // Recursively get the group and propagate values for bit i at level `level`.
681 // Level 0 is the initial level with the input propagate and generate values.
682 // Level n computes the group and propagate values for a stride of 2^(n-1).
683 // Uses memoization to cache intermediate results.
684 std::pair<Value, Value> getGroupAndPropagate(int64_t level, int64_t i);
685 OpBuilder &builder;
686 Location loc;
687 int64_t width;
688 DenseMap<std::pair<int64_t, int64_t>, std::pair<Value, Value>> prefixCache;
689};
690
691std::pair<Value, Value>
692LazyKoggeStonePrefixTree::getGroupAndPropagate(int64_t level, int64_t i) {
693 assert(i < width && "i out of bounds");
694 auto key = std::make_pair(level, i);
695 auto it = prefixCache.find(key);
696 if (it != prefixCache.end())
697 return it->second;
698
699 assert(level > 0 && "If the level is 0, we should have hit the cache");
700
701 int64_t previousStride = 1ULL << (level - 1);
702 if (i < previousStride) {
703 // No dependency, just copy from the previous level.
704 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
705 prefixCache[key] = {propagateI, generateI};
706 return prefixCache[key];
707 }
708 // Get the dependency index.
709 int64_t j = i - previousStride;
710 auto [propagateI, generateI] = getGroupAndPropagate(level - 1, i);
711 auto [propagateJ, generateJ] = getGroupAndPropagate(level - 1, j);
712 // Group generate: g_i OR (p_i AND g_j)
713 Value andPG = comb::AndOp::create(builder, loc, propagateI, generateJ);
714 Value newGenerate = comb::OrOp::create(builder, loc, generateI, andPG);
715 // Group propagate: p_i AND p_j
716 Value newPropagate =
717 comb::AndOp::create(builder, loc, propagateI, propagateJ);
718 prefixCache[key] = {newPropagate, newGenerate};
719 return prefixCache[key];
720}
721
722struct CombAddOpConversion : OpConversionPattern<AddOp> {
724
725 LogicalResult
726 matchAndRewrite(AddOp op, OpAdaptor adaptor,
727 ConversionPatternRewriter &rewriter) const override {
728 auto inputs = adaptor.getInputs();
729 // Lower only when there are two inputs.
730 // Variadic operands must be lowered in a different pattern.
731 if (inputs.size() != 2)
732 return failure();
733
734 auto width = op.getType().getIntOrFloatBitWidth();
735 // Skip a zero width value.
736 if (width == 0) {
737 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
738 op.getType(), 0);
739 return success();
740 }
741
742 // Check if the architecture is specified by an attribute.
743 auto arch = determineAdderArch(op, width);
744 if (arch == AdderArchitecture::RippleCarry)
745 return lowerRippleCarryAdder(op, inputs, rewriter);
746 return lowerParallelPrefixAdder(op, inputs, rewriter);
747 }
748
749 // Implement a basic ripple-carry adder for small bitwidths.
750 LogicalResult
751 lowerRippleCarryAdder(comb::AddOp op, ValueRange inputs,
752 ConversionPatternRewriter &rewriter) const {
753 auto width = op.getType().getIntOrFloatBitWidth();
754 // Implement a naive Ripple-carry full adder.
755 Value carry;
756
757 auto aBits = extractBits(rewriter, inputs[0]);
758 auto bBits = extractBits(rewriter, inputs[1]);
759 SmallVector<Value> results;
760 results.resize(width);
761 for (int64_t i = 0; i < width; ++i) {
762 SmallVector<Value> xorOperands = {aBits[i], bBits[i]};
763 if (carry)
764 xorOperands.push_back(carry);
765
766 // sum[i] = xor(carry[i-1], a[i], b[i])
767 // NOTE: The result is stored in reverse order.
768 results[width - i - 1] =
769 comb::XorOp::create(rewriter, op.getLoc(), xorOperands, true);
770
771 // If this is the last bit, we are done.
772 if (i == width - 1)
773 break;
774
775 // carry[i] = (carry[i-1] & (a[i] ^ b[i])) | (a[i] & b[i])
776 if (!carry) {
777 // This is the first bit, so the carry is the next carry.
778 carry = comb::AndOp::create(rewriter, op.getLoc(),
779 ValueRange{aBits[i], bBits[i]}, true);
780 continue;
781 }
782
783 carry = createMajorityFunction(rewriter, op.getLoc(), aBits[i], bBits[i],
784 carry);
785 }
786 LLVM_DEBUG(llvm::dbgs() << "Lower comb.add to Ripple-Carry Adder of width "
787 << width << "\n");
788
789 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
790 return success();
791 }
792
793 // Implement a parallel prefix adder - with Kogge-Stone or Brent-Kung trees
794 // Will introduce unused signals for the carry bits but these will be removed
795 // by the AIG pass.
796 LogicalResult
797 lowerParallelPrefixAdder(comb::AddOp op, ValueRange inputs,
798 ConversionPatternRewriter &rewriter) const {
799 auto width = op.getType().getIntOrFloatBitWidth();
800
801 auto aBits = extractBits(rewriter, inputs[0]);
802 auto bBits = extractBits(rewriter, inputs[1]);
803
804 // Construct propagate (p) and generate (g) signals
805 SmallVector<Value> p, g;
806 p.reserve(width);
807 g.reserve(width);
808
809 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
810 // p_i = a_i XOR b_i
811 p.push_back(comb::XorOp::create(rewriter, op.getLoc(), aBit, bBit));
812 // g_i = a_i AND b_i
813 g.push_back(comb::AndOp::create(rewriter, op.getLoc(), aBit, bBit));
814 }
815
816 LLVM_DEBUG({
817 llvm::dbgs() << "Lower comb.add to Parallel-Prefix of width " << width
818 << "\n--------------------------------------- Init\n";
819
820 for (int64_t i = 0; i < width; ++i) {
821 // p_i = a_i XOR b_i
822 llvm::dbgs() << "P0" << i << " = A" << i << " XOR B" << i << "\n";
823 // g_i = a_i AND b_i
824 llvm::dbgs() << "G0" << i << " = A" << i << " AND B" << i << "\n";
825 }
826 });
827
828 // Create copies of p and g for the prefix computation
829 SmallVector<Value> pPrefix = p;
830 SmallVector<Value> gPrefix = g;
831
832 // Check if the architecture is specified by an attribute.
833 auto arch = determineAdderArch(op, width);
834
835 switch (arch) {
836 case AdderArchitecture::RippleCarry:
837 llvm_unreachable("Ripple-Carry should be handled separately");
838 break;
839 case AdderArchitecture::Sklanskey:
840 lowerSklanskeyPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
841 break;
842 case AdderArchitecture::KoggeStone:
843 lowerKoggeStonePrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
844 break;
845 case AdderArchitecture::BrentKung:
846 lowerBrentKungPrefixTree(rewriter, op.getLoc(), pPrefix, gPrefix);
847 break;
848 }
849
850 // Generate result sum bits
851 // NOTE: The result is stored in reverse order.
852 SmallVector<Value> results;
853 results.resize(width);
854 // Sum bit 0 is just p[0] since carry_in = 0
855 results[width - 1] = p[0];
856
857 // For remaining bits, sum_i = p_i XOR g_(i-1)
858 // The carry into position i is the group generate from position i-1
859 for (int64_t i = 1; i < width; ++i)
860 results[width - 1 - i] =
861 comb::XorOp::create(rewriter, op.getLoc(), p[i], gPrefix[i - 1]);
862
863 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
864
865 LLVM_DEBUG({
866 llvm::dbgs() << "--------------------------------------- Completion\n"
867 << "RES0 = P0\n";
868 for (int64_t i = 1; i < width; ++i)
869 llvm::dbgs() << "RES" << i << " = P" << i << " XOR G" << i - 1 << "\n";
870 });
871
872 return success();
873 }
874};
875
876struct CombMulOpConversion : OpConversionPattern<MulOp> {
878 using OpAdaptor = typename OpConversionPattern<MulOp>::OpAdaptor;
879 LogicalResult
880 matchAndRewrite(MulOp op, OpAdaptor adaptor,
881 ConversionPatternRewriter &rewriter) const override {
882 if (adaptor.getInputs().size() != 2)
883 return failure();
884
885 Location loc = op.getLoc();
886 Value a = adaptor.getInputs()[0];
887 Value b = adaptor.getInputs()[1];
888 unsigned width = op.getType().getIntOrFloatBitWidth();
889
890 // Skip a zero width value.
891 if (width == 0) {
892 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(), 0);
893 return success();
894 }
895
896 // Extract individual bits from operands
897 SmallVector<Value> aBits = extractBits(rewriter, a);
898 SmallVector<Value> bBits = extractBits(rewriter, b);
899
900 auto falseValue = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
901
902 // Generate partial products
903 SmallVector<SmallVector<Value>> partialProducts;
904 partialProducts.reserve(width);
905 for (unsigned i = 0; i < width; ++i) {
906 SmallVector<Value> row(i, falseValue);
907 row.reserve(width);
908 // Generate partial product bits
909 for (unsigned j = 0; i + j < width; ++j)
910 row.push_back(
911 rewriter.createOrFold<comb::AndOp>(loc, aBits[j], bBits[i]));
912
913 partialProducts.push_back(row);
914 }
915
916 // If the width is 1, we are done.
917 if (width == 1) {
918 rewriter.replaceOp(op, partialProducts[0][0]);
919 return success();
920 }
921
922 // Wallace tree reduction - reduce to two addends.
923 datapath::CompressorTree comp(width, partialProducts, loc);
924 auto addends = comp.compressToHeight(rewriter, 2);
925
926 // Sum the two addends using a carry-propagate adder
927 auto newAdd = comb::AddOp::create(rewriter, loc, addends, true);
928 replaceOpAndCopyNamehint(rewriter, op, newAdd);
929 return success();
930 }
931};
932
933template <typename OpTy>
934struct DivModOpConversionBase : OpConversionPattern<OpTy> {
935 DivModOpConversionBase(MLIRContext *context, int64_t maxEmulationUnknownBits)
937 maxEmulationUnknownBits(maxEmulationUnknownBits) {
938 assert(maxEmulationUnknownBits < 32 &&
939 "maxEmulationUnknownBits must be less than 32");
940 }
941 const int64_t maxEmulationUnknownBits;
942};
943
944struct CombDivUOpConversion : DivModOpConversionBase<DivUOp> {
945 using DivModOpConversionBase<DivUOp>::DivModOpConversionBase;
946 LogicalResult
947 matchAndRewrite(DivUOp op, OpAdaptor adaptor,
948 ConversionPatternRewriter &rewriter) const override {
949 // Check if the divisor is a power of two.
950 if (llvm::succeeded(comb::convertDivUByPowerOfTwo(op, rewriter)))
951 return success();
952
953 // When rhs is not power of two and the number of unknown bits are small,
954 // create a mux tree that emulates all possible cases.
956 rewriter, maxEmulationUnknownBits, op,
957 [](const APInt &lhs, const APInt &rhs) {
958 // Division by zero is undefined, just return zero.
959 if (rhs.isZero())
960 return APInt::getZero(rhs.getBitWidth());
961 return lhs.udiv(rhs);
962 });
963 }
964};
965
966struct CombModUOpConversion : DivModOpConversionBase<ModUOp> {
967 using DivModOpConversionBase<ModUOp>::DivModOpConversionBase;
968 LogicalResult
969 matchAndRewrite(ModUOp op, OpAdaptor adaptor,
970 ConversionPatternRewriter &rewriter) const override {
971 // Check if the divisor is a power of two.
972 if (llvm::succeeded(comb::convertModUByPowerOfTwo(op, rewriter)))
973 return success();
974
975 // When rhs is not power of two and the number of unknown bits are small,
976 // create a mux tree that emulates all possible cases.
978 rewriter, maxEmulationUnknownBits, op,
979 [](const APInt &lhs, const APInt &rhs) {
980 // Division by zero is undefined, just return zero.
981 if (rhs.isZero())
982 return APInt::getZero(rhs.getBitWidth());
983 return lhs.urem(rhs);
984 });
985 }
986};
987
988struct CombDivSOpConversion : DivModOpConversionBase<DivSOp> {
989 using DivModOpConversionBase<DivSOp>::DivModOpConversionBase;
990
991 LogicalResult
992 matchAndRewrite(DivSOp op, OpAdaptor adaptor,
993 ConversionPatternRewriter &rewriter) const override {
994 // Currently only lower with emulation.
995 // TODO: Implement a signed division lowering at least for power of two.
997 rewriter, maxEmulationUnknownBits, op,
998 [](const APInt &lhs, const APInt &rhs) {
999 // Division by zero is undefined, just return zero.
1000 if (rhs.isZero())
1001 return APInt::getZero(rhs.getBitWidth());
1002 return lhs.sdiv(rhs);
1003 });
1004 }
1005};
1006
1007struct CombModSOpConversion : DivModOpConversionBase<ModSOp> {
1008 using DivModOpConversionBase<ModSOp>::DivModOpConversionBase;
1009 LogicalResult
1010 matchAndRewrite(ModSOp op, OpAdaptor adaptor,
1011 ConversionPatternRewriter &rewriter) const override {
1012 // Currently only lower with emulation.
1013 // TODO: Implement a signed modulus lowering at least for power of two.
1015 rewriter, maxEmulationUnknownBits, op,
1016 [](const APInt &lhs, const APInt &rhs) {
1017 // Division by zero is undefined, just return zero.
1018 if (rhs.isZero())
1019 return APInt::getZero(rhs.getBitWidth());
1020 return lhs.srem(rhs);
1021 });
1022 }
1023};
1024
1025struct CombICmpOpConversion : OpConversionPattern<ICmpOp> {
1027
1028 // Simple comparator for small bit widths
1029 static Value constructRippleCarry(Location loc, Value a, Value b,
1030 bool includeEq,
1031 ConversionPatternRewriter &rewriter) {
1032 // Construct following unsigned comparison expressions.
1033 // a <= b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] <= b[n-1:0])
1034 // a < b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] < b[n-1:0])
1035 auto aBits = extractBits(rewriter, a);
1036 auto bBits = extractBits(rewriter, b);
1037 Value acc = hw::ConstantOp::create(rewriter, loc, APInt(1, includeEq));
1038
1039 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1040 auto aBitXorBBit =
1041 rewriter.createOrFold<comb::XorOp>(loc, aBit, bBit, true);
1042 auto aEqualB = rewriter.createOrFold<synth::aig::AndInverterOp>(
1043 loc, aBitXorBBit, true);
1044 auto pred = rewriter.createOrFold<synth::aig::AndInverterOp>(
1045 loc, aBit, bBit, true, false);
1046
1047 auto aBitAndBBit = rewriter.createOrFold<comb::AndOp>(
1048 loc, ValueRange{aEqualB, acc}, true);
1049 acc = rewriter.createOrFold<comb::OrOp>(loc, pred, aBitAndBBit, true);
1050 }
1051 return acc;
1052 }
1053
1054 // Compute prefix comparison using parallel prefix algorithm
1055 // Note: This generates all intermediate prefix values even though we only
1056 // need the final result. Optimizing this to skip intermediate computations
1057 // is non-trivial because each iteration depends on results from previous
1058 // iterations. We rely on DCE passes to remove unused operations.
1059 // TODO: Lazily compute only the required prefix values. Kogge-Stone is
1060 // already implemented in a lazy manner below, but other architectures can
1061 // also be optimized.
1062 static Value computePrefixComparison(ConversionPatternRewriter &rewriter,
1063 Location loc, SmallVector<Value> pPrefix,
1064 SmallVector<Value> gPrefix,
1065 bool includeEq, AdderArchitecture arch) {
1066 auto width = pPrefix.size();
1067 Value finalGroup, finalPropagate;
1068 // Apply the appropriate prefix tree algorithm
1069 switch (arch) {
1070 case AdderArchitecture::RippleCarry:
1071 llvm_unreachable("Ripple-Carry should be handled separately");
1072 break;
1073 case AdderArchitecture::Sklanskey: {
1074 lowerSklanskeyPrefixTree(rewriter, loc, pPrefix, gPrefix);
1075 finalGroup = gPrefix[width - 1];
1076 finalPropagate = pPrefix[width - 1];
1077 break;
1078 }
1079 case AdderArchitecture::KoggeStone:
1080 // Use lazy Kogge-Stone implementation to avoid computing all
1081 // intermediate prefix values.
1082 std::tie(finalPropagate, finalGroup) =
1083 LazyKoggeStonePrefixTree(rewriter, loc, width, pPrefix, gPrefix)
1084 .getFinal(width - 1);
1085 break;
1086 case AdderArchitecture::BrentKung: {
1087 lowerBrentKungPrefixTree(rewriter, loc, pPrefix, gPrefix);
1088 finalGroup = gPrefix[width - 1];
1089 finalPropagate = pPrefix[width - 1];
1090 break;
1091 }
1092 }
1093
1094 // Final result: `finalGroup` gives us "a < b"
1095 if (includeEq) {
1096 // a <= b iff (a < b) OR (a == b)
1097 // a == b iff `finalPropagate` (all bits are equal)
1098 return comb::OrOp::create(rewriter, loc, finalGroup, finalPropagate);
1099 }
1100 // a < b iff `finalGroup`
1101 return finalGroup;
1102 }
1103
1104 // Construct an unsigned comparator using either ripple-carry or
1105 // parallel-prefix architecture. Comparison uses parallel prefix tree as an
1106 // internal component, so use `AdderArchitecture` enum to select architecture.
1107 static Value constructUnsignedCompare(Operation *op, Location loc, Value a,
1108 Value b, bool isLess, bool includeEq,
1109 ConversionPatternRewriter &rewriter) {
1110 // Ensure a <= b by swapping for simplicity.
1111 if (!isLess)
1112 std::swap(a, b);
1113 auto width = a.getType().getIntOrFloatBitWidth();
1114
1115 // Check if the architecture is specified by an attribute.
1116 auto arch = determineAdderArch(op, width);
1117 if (arch == AdderArchitecture::RippleCarry)
1118 return constructRippleCarry(loc, a, b, includeEq, rewriter);
1119
1120 // For larger widths, use parallel prefix tree
1121 auto aBits = extractBits(rewriter, a);
1122 auto bBits = extractBits(rewriter, b);
1123
1124 // For comparison, we compute:
1125 // - Equal bits: eq_i = ~(a_i ^ b_i)
1126 // - Greater bits: gt_i = ~a_i & b_i (a_i < b_i)
1127 // - Propagate: p_i = eq_i (equality propagates)
1128 // - Generate: g_i = gt_i (greater-than generates)
1129 SmallVector<Value> eq, gt;
1130 eq.reserve(width);
1131 gt.reserve(width);
1132
1133 auto one =
1134 hw::ConstantOp::create(rewriter, loc, rewriter.getIntegerType(1), 1);
1135
1136 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
1137 // eq_i = ~(a_i ^ b_i) = a_i == b_i
1138 auto xorBit = comb::XorOp::create(rewriter, loc, aBit, bBit);
1139 eq.push_back(comb::XorOp::create(rewriter, loc, xorBit, one));
1140
1141 // gt_i = ~a_i & b_i = a_i < b_i
1142 auto notA = comb::XorOp::create(rewriter, loc, aBit, one);
1143 gt.push_back(comb::AndOp::create(rewriter, loc, notA, bBit));
1144 }
1145
1146 return computePrefixComparison(rewriter, loc, std::move(eq), std::move(gt),
1147 includeEq, arch);
1148 }
1149
1150 LogicalResult
1151 matchAndRewrite(ICmpOp op, OpAdaptor adaptor,
1152 ConversionPatternRewriter &rewriter) const override {
1153 auto lhs = adaptor.getLhs();
1154 auto rhs = adaptor.getRhs();
1155
1156 switch (op.getPredicate()) {
1157 default:
1158 return failure();
1159
1160 case ICmpPredicate::eq:
1161 case ICmpPredicate::ceq: {
1162 // a == b ==> ~(a[n] ^ b[n]) & ~(a[n-1] ^ b[n-1]) & ...
1163 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1164 auto xorBits = extractBits(rewriter, xorOp);
1165 SmallVector<bool> allInverts(xorBits.size(), true);
1166 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
1167 rewriter, op, xorBits, allInverts);
1168 return success();
1169 }
1170
1171 case ICmpPredicate::ne:
1172 case ICmpPredicate::cne: {
1173 // a != b ==> (a[n] ^ b[n]) | (a[n-1] ^ b[n-1]) | ...
1174 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
1175 replaceOpWithNewOpAndCopyNamehint<comb::OrOp>(
1176 rewriter, op, extractBits(rewriter, xorOp), true);
1177 return success();
1178 }
1179
1180 case ICmpPredicate::uge:
1181 case ICmpPredicate::ugt:
1182 case ICmpPredicate::ule:
1183 case ICmpPredicate::ult: {
1184 bool isLess = op.getPredicate() == ICmpPredicate::ult ||
1185 op.getPredicate() == ICmpPredicate::ule;
1186 bool includeEq = op.getPredicate() == ICmpPredicate::uge ||
1187 op.getPredicate() == ICmpPredicate::ule;
1188 replaceOpAndCopyNamehint(rewriter, op,
1189 constructUnsignedCompare(op, op.getLoc(), lhs,
1190 rhs, isLess, includeEq,
1191 rewriter));
1192 return success();
1193 }
1194 case ICmpPredicate::slt:
1195 case ICmpPredicate::sle:
1196 case ICmpPredicate::sgt:
1197 case ICmpPredicate::sge: {
1198 if (lhs.getType().getIntOrFloatBitWidth() == 0)
1199 return rewriter.notifyMatchFailure(
1200 op.getLoc(), "i0 signed comparison is unsupported");
1201 bool isLess = op.getPredicate() == ICmpPredicate::slt ||
1202 op.getPredicate() == ICmpPredicate::sle;
1203 bool includeEq = op.getPredicate() == ICmpPredicate::sge ||
1204 op.getPredicate() == ICmpPredicate::sle;
1205
1206 // Get a sign bit
1207 auto signA = extractMSB(rewriter, lhs);
1208 auto signB = extractMSB(rewriter, rhs);
1209 auto aRest = extractOtherThanMSB(rewriter, lhs);
1210 auto bRest = extractOtherThanMSB(rewriter, rhs);
1211
1212 // Compare magnitudes (all bits except sign)
1213 auto sameSignResult = constructUnsignedCompare(
1214 op, op.getLoc(), aRest, bRest, isLess, includeEq, rewriter);
1215
1216 // XOR of signs: true if signs are different
1217 auto signsDiffer =
1218 comb::XorOp::create(rewriter, op.getLoc(), signA, signB);
1219
1220 // Result when signs are different
1221 Value diffSignResult = isLess ? signA : signB;
1222
1223 // Final result: choose based on whether signs differ
1224 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
1225 rewriter, op, signsDiffer, diffSignResult, sameSignResult);
1226 return success();
1227 }
1228 }
1229 }
1230};
1231
1232struct CombParityOpConversion : OpConversionPattern<ParityOp> {
1234
1235 LogicalResult
1236 matchAndRewrite(ParityOp op, OpAdaptor adaptor,
1237 ConversionPatternRewriter &rewriter) const override {
1238 // Parity is the XOR of all bits.
1239 replaceOpWithNewOpAndCopyNamehint<comb::XorOp>(
1240 rewriter, op, extractBits(rewriter, adaptor.getInput()), true);
1241 return success();
1242 }
1243};
1244
1245struct CombShlOpConversion : OpConversionPattern<comb::ShlOp> {
1247
1248 LogicalResult
1249 matchAndRewrite(comb::ShlOp op, OpAdaptor adaptor,
1250 ConversionPatternRewriter &rewriter) const override {
1251 auto width = op.getType().getIntOrFloatBitWidth();
1252 auto lhs = adaptor.getLhs();
1253 auto result = createShiftLogic</*isLeftShift=*/true>(
1254 rewriter, op.getLoc(), adaptor.getRhs(), width,
1255 /*getPadding=*/
1256 [&](int64_t index) {
1257 // Don't create zero width value.
1258 if (index == 0)
1259 return Value();
1260 // Padding is 0 for left shift.
1261 return rewriter.createOrFold<hw::ConstantOp>(
1262 op.getLoc(), rewriter.getIntegerType(index), 0);
1263 },
1264 /*getExtract=*/
1265 [&](int64_t index) {
1266 assert(index < width && "index out of bounds");
1267 // Exract the bits from LSB.
1268 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, 0,
1269 width - index);
1270 });
1271
1272 replaceOpAndCopyNamehint(rewriter, op, result);
1273 return success();
1274 }
1275};
1276
1277struct CombShrUOpConversion : OpConversionPattern<comb::ShrUOp> {
1279
1280 LogicalResult
1281 matchAndRewrite(comb::ShrUOp op, OpAdaptor adaptor,
1282 ConversionPatternRewriter &rewriter) const override {
1283 auto width = op.getType().getIntOrFloatBitWidth();
1284 auto lhs = adaptor.getLhs();
1285 auto result = createShiftLogic</*isLeftShift=*/false>(
1286 rewriter, op.getLoc(), adaptor.getRhs(), width,
1287 /*getPadding=*/
1288 [&](int64_t index) {
1289 // Don't create zero width value.
1290 if (index == 0)
1291 return Value();
1292 // Padding is 0 for right shift.
1293 return rewriter.createOrFold<hw::ConstantOp>(
1294 op.getLoc(), rewriter.getIntegerType(index), 0);
1295 },
1296 /*getExtract=*/
1297 [&](int64_t index) {
1298 assert(index < width && "index out of bounds");
1299 // Exract the bits from MSB.
1300 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1301 width - index);
1302 });
1303
1304 replaceOpAndCopyNamehint(rewriter, op, result);
1305 return success();
1306 }
1307};
1308
1309struct CombShrSOpConversion : OpConversionPattern<comb::ShrSOp> {
1311
1312 LogicalResult
1313 matchAndRewrite(comb::ShrSOp op, OpAdaptor adaptor,
1314 ConversionPatternRewriter &rewriter) const override {
1315 auto width = op.getType().getIntOrFloatBitWidth();
1316 if (width == 0)
1317 return rewriter.notifyMatchFailure(op.getLoc(),
1318 "i0 signed shift is unsupported");
1319 auto lhs = adaptor.getLhs();
1320 // Get the sign bit.
1321 auto sign =
1322 rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, width - 1, 1);
1323
1324 // NOTE: The max shift amount is width - 1 because the sign bit is
1325 // already shifted out.
1326 auto result = createShiftLogic</*isLeftShift=*/false>(
1327 rewriter, op.getLoc(), adaptor.getRhs(), width - 1,
1328 /*getPadding=*/
1329 [&](int64_t index) {
1330 return rewriter.createOrFold<comb::ReplicateOp>(op.getLoc(), sign,
1331 index + 1);
1332 },
1333 /*getExtract=*/
1334 [&](int64_t index) {
1335 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1336 width - index - 1);
1337 });
1338
1339 replaceOpAndCopyNamehint(rewriter, op, result);
1340 return success();
1341 }
1342};
1343
1344} // namespace
1345
1346//===----------------------------------------------------------------------===//
1347// Convert Comb to AIG pass
1348//===----------------------------------------------------------------------===//
1349
1350namespace {
1351struct ConvertCombToSynthPass
1352 : public impl::ConvertCombToSynthBase<ConvertCombToSynthPass> {
1353 void runOnOperation() override;
1354 using ConvertCombToSynthBase<ConvertCombToSynthPass>::ConvertCombToSynthBase;
1355};
1356} // namespace
1357
1358static void
1360 uint32_t maxEmulationUnknownBits,
1361 bool forceAIG) {
1362 patterns.add<
1363 // Bitwise Logical Ops
1364 CombAndOpConversion, CombMuxOpConversion, CombParityOpConversion,
1365 CombXorOpToSynthConversion,
1366 // Arithmetic Ops
1367 CombMulOpConversion, CombICmpOpConversion,
1368 // Shift Ops
1369 CombShlOpConversion, CombShrUOpConversion, CombShrSOpConversion,
1370 // Variadic ops that must be lowered to binary operations
1371 CombLowerVariadicOp<AddOp>, CombLowerVariadicOp<MulOp>>(
1372 patterns.getContext());
1373
1374 if (forceAIG)
1375 patterns.add<SynthXorInverterOpConversion>(patterns.getContext());
1376
1377 patterns.add(comb::convertSubToAdd);
1378
1379 patterns.add<CombOrToAIGConversion, CombAddOpConversion>(
1380 patterns.getContext());
1381 synth::populateVariadicAndInverterLoweringPatterns(patterns);
1382
1383 if (forceAIG)
1384 synth::populateVariadicXorInverterLoweringPatterns(patterns);
1385
1386 // Add div/mod patterns with a threshold given by the pass option.
1387 patterns.add<CombDivUOpConversion, CombModUOpConversion, CombDivSOpConversion,
1388 CombModSOpConversion>(patterns.getContext(),
1389 maxEmulationUnknownBits);
1390}
1391
1392void ConvertCombToSynthPass::runOnOperation() {
1393 ConversionTarget target(getContext());
1394
1395 // Comb is source dialect.
1396 target.addIllegalDialect<comb::CombDialect>();
1397 // Keep data movement operations like Extract, Concat and Replicate.
1398 target.addLegalOp<comb::ExtractOp, comb::ConcatOp, comb::ReplicateOp,
1400
1401 // Treat array operations as illegal. Strictly speaking, other than array
1402 // get operation with non-const index are legal in AIG but array types
1403 // prevent a bunch of optimizations so just lower them to integer
1404 // operations. It's required to run HWAggregateToComb pass before this pass.
1406 hw::AggregateConstantOp>();
1407
1408 target.addLegalDialect<synth::SynthDialect>();
1409 if (forceAIG)
1410 target.addIllegalOp<synth::XorInverterOp>();
1411
1412 // If additional legal ops are specified, add them to the target.
1413 if (!additionalLegalOps.empty())
1414 for (const auto &opName : additionalLegalOps)
1415 target.addLegalOp(OperationName(opName, &getContext()));
1416
1417 RewritePatternSet patterns(&getContext());
1418 populateCombToAIGConversionPatterns(patterns, maxEmulationUnknownBits,
1419 forceAIG);
1420
1421 if (failed(mlir::applyPartialConversion(getOperation(), target,
1422 std::move(patterns))))
1423 return signalPassFailure();
1424}
assert(baseType &&"element must be base type")
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc, Value shiftAmount, int64_t maxShiftAmount, llvm::function_ref< Value(int64_t)> getPadding, llvm::function_ref< Value(int64_t)> getExtract)
static APInt substitueMaskToValues(size_t width, llvm::SmallVectorImpl< ConstantOrValue > &constantOrValues, uint32_t mask)
static LogicalResult emulateBinaryOpForUnknownBits(ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits, Operation *op, llvm::function_ref< APInt(const APInt &, const APInt &)> emulate)
static int64_t getNumUnknownBitsAndPopulateValues(Value value, llvm::SmallVectorImpl< ConstantOrValue > &values)
static Value createMajorityFunction(OpBuilder &rewriter, Location loc, Value a, Value b, Value carry)
static Value extractOtherThanMSB(OpBuilder &builder, Value val)
static Value extractMSB(OpBuilder &builder, Value val)
static void populateCombToAIGConversionPatterns(RewritePatternSet &patterns, uint32_t maxEmulationUnknownBits, bool forceAIG)
static std::unique_ptr< Context > context
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
static Value lowerFullyAssociativeOp(Operation &op, OperandRange operands, SmallVector< Operation * > &newOps)
Lower a variadic fully-associative operation into an expression tree.
create(data_type, value)
Definition hw.py:441
create(data_type, value)
Definition hw.py:433
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
Definition Naming.cpp:73
Definition comb.py:1