Loading [MathJax]/extensions/tex2jax.js
CIRCT 22.0.0git
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
CombToAIG.cpp
Go to the documentation of this file.
1//===- CombToAIG.cpp - Comb to AIG Conversion Pass --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the main Comb to AIG Conversion Pass Implementation.
10//
11//===----------------------------------------------------------------------===//
12
18#include "mlir/Pass/Pass.h"
19#include "mlir/Transforms/DialectConversion.h"
20#include "llvm/ADT/PointerUnion.h"
21#include "llvm/Support/Debug.h"
22
23#define DEBUG_TYPE "comb-to-aig"
24
25namespace circt {
26#define GEN_PASS_DEF_CONVERTCOMBTOAIG
27#include "circt/Conversion/Passes.h.inc"
28} // namespace circt
29
30using namespace circt;
31using namespace comb;
32
33//===----------------------------------------------------------------------===//
34// Utility Functions
35//===----------------------------------------------------------------------===//
36
37// A wrapper for comb::extractBits that returns a SmallVector<Value>.
38static SmallVector<Value> extractBits(OpBuilder &builder, Value val) {
39 SmallVector<Value> bits;
40 comb::extractBits(builder, val, bits);
41 return bits;
42}
43
44// Construct a mux tree for shift operations. `isLeftShift` controls the
45// direction of the shift operation and is used to determine order of the
46// padding and extracted bits. Callbacks `getPadding` and `getExtract` are used
47// to get the padding and extracted bits for each shift amount. `getPadding`
48// could return a nullptr as i0 value but except for that, these callbacks must
49// return a valid value for each shift amount in the range [0, maxShiftAmount].
50// The value for `maxShiftAmount` is used as the out-of-bounds value.
51template <bool isLeftShift>
52static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc,
53 Value shiftAmount, int64_t maxShiftAmount,
54 llvm::function_ref<Value(int64_t)> getPadding,
55 llvm::function_ref<Value(int64_t)> getExtract) {
56 // Extract individual bits from shift amount
57 auto bits = extractBits(rewriter, shiftAmount);
58
59 // Create nodes for each possible shift amount
60 SmallVector<Value> nodes;
61 nodes.reserve(maxShiftAmount);
62 for (int64_t i = 0; i < maxShiftAmount; ++i) {
63 Value extract = getExtract(i);
64 Value padding = getPadding(i);
65
66 if (!padding) {
67 nodes.push_back(extract);
68 continue;
69 }
70
71 // Concatenate extracted bits with padding
72 if (isLeftShift)
73 nodes.push_back(
74 rewriter.createOrFold<comb::ConcatOp>(loc, extract, padding));
75 else
76 nodes.push_back(
77 rewriter.createOrFold<comb::ConcatOp>(loc, padding, extract));
78 }
79
80 // Create out-of-bounds value
81 auto outOfBoundsValue = getPadding(maxShiftAmount);
82 assert(outOfBoundsValue && "outOfBoundsValue must be valid");
83
84 // Construct mux tree for shift operation
85 auto result =
86 comb::constructMuxTree(rewriter, loc, bits, nodes, outOfBoundsValue);
87
88 // Add bounds checking
89 auto inBound = rewriter.createOrFold<comb::ICmpOp>(
90 loc, ICmpPredicate::ult, shiftAmount,
91 hw::ConstantOp::create(rewriter, loc, shiftAmount.getType(),
92 maxShiftAmount));
93
94 return rewriter.createOrFold<comb::MuxOp>(loc, inBound, result,
95 outOfBoundsValue);
96}
97
98namespace {
99// A union of Value and IntegerAttr to cleanly handle constant values.
100using ConstantOrValue = llvm::PointerUnion<Value, mlir::IntegerAttr>;
101} // namespace
102
103// Return the number of unknown bits and populate the concatenated values.
105 Value value, llvm::SmallVectorImpl<ConstantOrValue> &values) {
106 // Constant or zero width value are all known.
107 if (value.getType().isInteger(0))
108 return 0;
109
110 // Recursively count unknown bits for concat.
111 if (auto concat = value.getDefiningOp<comb::ConcatOp>()) {
112 int64_t totalUnknownBits = 0;
113 for (auto concatInput : llvm::reverse(concat.getInputs())) {
114 auto unknownBits =
115 getNumUnknownBitsAndPopulateValues(concatInput, values);
116 if (unknownBits < 0)
117 return unknownBits;
118 totalUnknownBits += unknownBits;
119 }
120 return totalUnknownBits;
121 }
122
123 // Constant value is known.
124 if (auto constant = value.getDefiningOp<hw::ConstantOp>()) {
125 values.push_back(constant.getValueAttr());
126 return 0;
127 }
128
129 // Consider other operations as unknown bits.
130 // TODO: We can handle replicate, extract, etc.
131 values.push_back(value);
132 return hw::getBitWidth(value.getType());
133}
134
135// Return a value that substitutes the unknown bits with the mask.
136static APInt
138 llvm::SmallVectorImpl<ConstantOrValue> &constantOrValues,
139 uint32_t mask) {
140 uint32_t bitPos = 0, unknownPos = 0;
141 APInt result(width, 0);
142 for (auto constantOrValue : constantOrValues) {
143 int64_t elemWidth;
144 if (auto constant = dyn_cast<IntegerAttr>(constantOrValue)) {
145 elemWidth = constant.getValue().getBitWidth();
146 result.insertBits(constant.getValue(), bitPos);
147 } else {
148 elemWidth = hw::getBitWidth(cast<Value>(constantOrValue).getType());
149 assert(elemWidth >= 0 && "unknown bit width");
150 assert(elemWidth + unknownPos < 32 && "unknown bit width too large");
151 // Create a mask for the unknown bits.
152 uint32_t usedBits = (mask >> unknownPos) & ((1 << elemWidth) - 1);
153 result.insertBits(APInt(elemWidth, usedBits), bitPos);
154 unknownPos += elemWidth;
155 }
156 bitPos += elemWidth;
157 }
158
159 return result;
160}
161
162// Emulate a binary operation with unknown bits using a table lookup.
163// This function enumerates all possible combinations of unknown bits and
164// emulates the operation for each combination.
165static LogicalResult emulateBinaryOpForUnknownBits(
166 ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits,
167 Operation *op,
168 llvm::function_ref<APInt(const APInt &, const APInt &)> emulate) {
169 SmallVector<ConstantOrValue> lhsValues, rhsValues;
170
171 assert(op->getNumResults() == 1 && op->getNumOperands() == 2 &&
172 "op must be a single result binary operation");
173
174 auto lhs = op->getOperand(0);
175 auto rhs = op->getOperand(1);
176 auto width = op->getResult(0).getType().getIntOrFloatBitWidth();
177 auto loc = op->getLoc();
178 auto numLhsUnknownBits = getNumUnknownBitsAndPopulateValues(lhs, lhsValues);
179 auto numRhsUnknownBits = getNumUnknownBitsAndPopulateValues(rhs, rhsValues);
180
181 // If unknown bit width is detected, abort the lowering.
182 if (numLhsUnknownBits < 0 || numRhsUnknownBits < 0)
183 return failure();
184
185 int64_t totalUnknownBits = numLhsUnknownBits + numRhsUnknownBits;
186 if (totalUnknownBits > maxEmulationUnknownBits)
187 return failure();
188
189 SmallVector<Value> emulatedResults;
190 emulatedResults.reserve(1 << totalUnknownBits);
191
192 // Emulate all possible cases.
193 DenseMap<IntegerAttr, hw::ConstantOp> constantPool;
194 auto getConstant = [&](const APInt &value) -> hw::ConstantOp {
195 auto attr = rewriter.getIntegerAttr(rewriter.getIntegerType(width), value);
196 auto it = constantPool.find(attr);
197 if (it != constantPool.end())
198 return it->second;
199 auto constant = hw::ConstantOp::create(rewriter, loc, value);
200 constantPool[attr] = constant;
201 return constant;
202 };
203
204 for (uint32_t lhsMask = 0, lhsMaskEnd = 1 << numLhsUnknownBits;
205 lhsMask < lhsMaskEnd; ++lhsMask) {
206 APInt lhsValue = substitueMaskToValues(width, lhsValues, lhsMask);
207 for (uint32_t rhsMask = 0, rhsMaskEnd = 1 << numRhsUnknownBits;
208 rhsMask < rhsMaskEnd; ++rhsMask) {
209 APInt rhsValue = substitueMaskToValues(width, rhsValues, rhsMask);
210 // Emulate.
211 emulatedResults.push_back(getConstant(emulate(lhsValue, rhsValue)));
212 }
213 }
214
215 // Create selectors for mux tree.
216 SmallVector<Value> selectors;
217 selectors.reserve(totalUnknownBits);
218 for (auto &concatedValues : {rhsValues, lhsValues})
219 for (auto valueOrConstant : concatedValues) {
220 auto value = dyn_cast<Value>(valueOrConstant);
221 if (!value)
222 continue;
223 extractBits(rewriter, value, selectors);
224 }
225
226 assert(totalUnknownBits == static_cast<int64_t>(selectors.size()) &&
227 "number of selectors must match");
228 auto muxed = constructMuxTree(rewriter, loc, selectors, emulatedResults,
229 getConstant(APInt::getZero(width)));
230
231 replaceOpAndCopyNamehint(rewriter, op, muxed);
232 return success();
233}
234
235//===----------------------------------------------------------------------===//
236// Conversion patterns
237//===----------------------------------------------------------------------===//
238
239namespace {
240
241/// Lower a comb::AndOp operation to aig::AndInverterOp
242struct CombAndOpConversion : OpConversionPattern<AndOp> {
244
245 LogicalResult
246 matchAndRewrite(AndOp op, OpAdaptor adaptor,
247 ConversionPatternRewriter &rewriter) const override {
248 SmallVector<bool> nonInverts(adaptor.getInputs().size(), false);
249 replaceOpWithNewOpAndCopyNamehint<aig::AndInverterOp>(
250 rewriter, op, adaptor.getInputs(), nonInverts);
251 return success();
252 }
253};
254
255/// Lower a comb::OrOp operation to aig::AndInverterOp with invert flags
256struct CombOrOpConversion : OpConversionPattern<OrOp> {
258
259 LogicalResult
260 matchAndRewrite(OrOp op, OpAdaptor adaptor,
261 ConversionPatternRewriter &rewriter) const override {
262 // Implement Or using And and invert flags: a | b = ~(~a & ~b)
263 SmallVector<bool> allInverts(adaptor.getInputs().size(), true);
264 auto andOp = aig::AndInverterOp::create(rewriter, op.getLoc(),
265 adaptor.getInputs(), allInverts);
266 replaceOpWithNewOpAndCopyNamehint<aig::AndInverterOp>(rewriter, op, andOp,
267 /*invert=*/true);
268 return success();
269 }
270};
271
272/// Lower a comb::XorOp operation to AIG operations
273struct CombXorOpConversion : OpConversionPattern<XorOp> {
275
276 LogicalResult
277 matchAndRewrite(XorOp op, OpAdaptor adaptor,
278 ConversionPatternRewriter &rewriter) const override {
279 if (op.getNumOperands() != 2)
280 return failure();
281 // Xor using And with invert flags: a ^ b = (a | b) & (~a | ~b)
282
283 // (a | b) = ~(~a & ~b)
284 // (~a | ~b) = ~(a & b)
285 auto inputs = adaptor.getInputs();
286 SmallVector<bool> allInverts(inputs.size(), true);
287 SmallVector<bool> allNotInverts(inputs.size(), false);
288
289 auto notAAndNotB =
290 aig::AndInverterOp::create(rewriter, op.getLoc(), inputs, allInverts);
291 auto aAndB = aig::AndInverterOp::create(rewriter, op.getLoc(), inputs,
292 allNotInverts);
293
294 replaceOpWithNewOpAndCopyNamehint<aig::AndInverterOp>(rewriter, op,
295 notAAndNotB, aAndB,
296 /*lhs_invert=*/true,
297 /*rhs_invert=*/true);
298 return success();
299 }
300};
301
302template <typename OpTy>
303struct CombLowerVariadicOp : OpConversionPattern<OpTy> {
305 using OpAdaptor = typename OpConversionPattern<OpTy>::OpAdaptor;
306 LogicalResult
307 matchAndRewrite(OpTy op, OpAdaptor adaptor,
308 ConversionPatternRewriter &rewriter) const override {
309 auto result = lowerFullyAssociativeOp(op, op.getOperands(), rewriter);
310 replaceOpAndCopyNamehint(rewriter, op, result);
311 return success();
312 }
313
314 static Value lowerFullyAssociativeOp(OpTy op, OperandRange operands,
315 ConversionPatternRewriter &rewriter) {
316 Value lhs, rhs;
317 switch (operands.size()) {
318 case 0:
319 llvm_unreachable("cannot be called with empty operand range");
320 break;
321 case 1:
322 return operands[0];
323 case 2:
324 lhs = operands[0];
325 rhs = operands[1];
326 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
327 default:
328 auto firstHalf = operands.size() / 2;
329 lhs =
330 lowerFullyAssociativeOp(op, operands.take_front(firstHalf), rewriter);
331 rhs =
332 lowerFullyAssociativeOp(op, operands.drop_front(firstHalf), rewriter);
333 return OpTy::create(rewriter, op.getLoc(), ValueRange{lhs, rhs}, true);
334 }
335 }
336};
337
338// Lower comb::MuxOp to AIG operations.
339struct CombMuxOpConversion : OpConversionPattern<MuxOp> {
341
342 LogicalResult
343 matchAndRewrite(MuxOp op, OpAdaptor adaptor,
344 ConversionPatternRewriter &rewriter) const override {
345 // Implement: c ? a : b = (replicate(c) & a) | (~replicate(c) & b)
346
347 Value cond = op.getCond();
348 auto trueVal = op.getTrueValue();
349 auto falseVal = op.getFalseValue();
350
351 if (!op.getType().isInteger()) {
352 // If the type of the mux is not integer, bitcast the operands first.
353 auto widthType = rewriter.getIntegerType(hw::getBitWidth(op.getType()));
354 trueVal =
355 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, trueVal);
356 falseVal =
357 hw::BitcastOp::create(rewriter, op->getLoc(), widthType, falseVal);
358 }
359
360 // Replicate condition if needed
361 if (!trueVal.getType().isInteger(1))
362 cond = comb::ReplicateOp::create(rewriter, op.getLoc(), trueVal.getType(),
363 cond);
364
365 // c ? a : b => (replicate(c) & a) | (~replicate(c) & b)
366 auto lhs = aig::AndInverterOp::create(rewriter, op.getLoc(), cond, trueVal);
367 auto rhs = aig::AndInverterOp::create(rewriter, op.getLoc(), cond, falseVal,
368 true, false);
369
370 Value result = comb::OrOp::create(rewriter, op.getLoc(), lhs, rhs);
371 // Insert the bitcast if the type of the mux is not integer.
372 if (result.getType() != op.getType())
373 result =
374 hw::BitcastOp::create(rewriter, op.getLoc(), op.getType(), result);
375 replaceOpAndCopyNamehint(rewriter, op, result);
376 return success();
377 }
378};
379
380struct CombAddOpConversion : OpConversionPattern<AddOp> {
382 LogicalResult
383 matchAndRewrite(AddOp op, OpAdaptor adaptor,
384 ConversionPatternRewriter &rewriter) const override {
385 auto inputs = adaptor.getInputs();
386 // Lower only when there are two inputs.
387 // Variadic operands must be lowered in a different pattern.
388 if (inputs.size() != 2)
389 return failure();
390
391 auto width = op.getType().getIntOrFloatBitWidth();
392 // Skip a zero width value.
393 if (width == 0) {
394 replaceOpWithNewOpAndCopyNamehint<hw::ConstantOp>(rewriter, op,
395 op.getType(), 0);
396 return success();
397 }
398
399 if (width < 8)
400 lowerRippleCarryAdder(op, inputs, rewriter);
401 else
402 lowerParallelPrefixAdder(op, inputs, rewriter);
403
404 return success();
405 }
406
407 // Implement a basic ripple-carry adder for small bitwidths.
408 void lowerRippleCarryAdder(comb::AddOp op, ValueRange inputs,
409 ConversionPatternRewriter &rewriter) const {
410 auto width = op.getType().getIntOrFloatBitWidth();
411 // Implement a naive Ripple-carry full adder.
412 Value carry;
413
414 auto aBits = extractBits(rewriter, inputs[0]);
415 auto bBits = extractBits(rewriter, inputs[1]);
416 SmallVector<Value> results;
417 results.resize(width);
418 for (int64_t i = 0; i < width; ++i) {
419 SmallVector<Value> xorOperands = {aBits[i], bBits[i]};
420 if (carry)
421 xorOperands.push_back(carry);
422
423 // sum[i] = xor(carry[i-1], a[i], b[i])
424 // NOTE: The result is stored in reverse order.
425 results[width - i - 1] =
426 comb::XorOp::create(rewriter, op.getLoc(), xorOperands, true);
427
428 // If this is the last bit, we are done.
429 if (i == width - 1)
430 break;
431
432 // carry[i] = (carry[i-1] & (a[i] ^ b[i])) | (a[i] & b[i])
433 Value nextCarry = comb::AndOp::create(
434 rewriter, op.getLoc(), ValueRange{aBits[i], bBits[i]}, true);
435 if (!carry) {
436 // This is the first bit, so the carry is the next carry.
437 carry = nextCarry;
438 continue;
439 }
440
441 auto aXnorB = comb::XorOp::create(rewriter, op.getLoc(),
442 ValueRange{aBits[i], bBits[i]}, true);
443 auto andOp = comb::AndOp::create(rewriter, op.getLoc(),
444 ValueRange{carry, aXnorB}, true);
445 carry = comb::OrOp::create(rewriter, op.getLoc(),
446 ValueRange{andOp, nextCarry}, true);
447 }
448 LLVM_DEBUG(llvm::dbgs() << "Lower comb.add to Ripple-Carry Adder of width "
449 << width << "\n");
450
451 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
452 }
453
454 // Implement a parallel prefix adder - with Kogge-Stone or Brent-Kung trees
455 // Will introduce unused signals for the carry bits but these will be removed
456 // by the AIG pass.
457 void lowerParallelPrefixAdder(comb::AddOp op, ValueRange inputs,
458 ConversionPatternRewriter &rewriter) const {
459 auto width = op.getType().getIntOrFloatBitWidth();
460
461 auto aBits = extractBits(rewriter, inputs[0]);
462 auto bBits = extractBits(rewriter, inputs[1]);
463 // Construct propagate (p) and generate (g) signals
464 SmallVector<Value> p, g;
465 p.reserve(width);
466 g.reserve(width);
467
468 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
469 // p_i = a_i XOR b_i
470 p.push_back(comb::XorOp::create(rewriter, op.getLoc(), aBit, bBit));
471 // g_i = a_i AND b_i
472 g.push_back(comb::AndOp::create(rewriter, op.getLoc(), aBit, bBit));
473 }
474
475 LLVM_DEBUG({
476 llvm::dbgs() << "Lower comb.add to Parallel-Prefix of width " << width
477 << "\n--------------------------------------- Init\n";
478
479 for (int64_t i = 0; i < width; ++i) {
480 // p_i = a_i XOR b_i
481 llvm::dbgs() << "P0" << i << " = A" << i << " XOR B" << i << "\n";
482 // g_i = a_i AND b_i
483 llvm::dbgs() << "G0" << i << " = A" << i << " AND B" << i << "\n";
484 }
485 });
486
487 // Create copies of p and g for the prefix computation
488 SmallVector<Value> pPrefix = p;
489 SmallVector<Value> gPrefix = g;
490 if (width < 32)
491 lowerKoggeStonePrefixTree(op, inputs, rewriter, pPrefix, gPrefix);
492 else
493 lowerBrentKungPrefixTree(op, inputs, rewriter, pPrefix, gPrefix);
494
495 // Generate result sum bits
496 // NOTE: The result is stored in reverse order.
497 SmallVector<Value> results;
498 results.resize(width);
499 // Sum bit 0 is just p[0] since carry_in = 0
500 results[width - 1] = p[0];
501
502 // For remaining bits, sum_i = p_i XOR c_(i-1)
503 // The carry into position i is the group generate from position i-1
504 for (int64_t i = 1; i < width; ++i)
505 results[width - 1 - i] =
506 comb::XorOp::create(rewriter, op.getLoc(), p[i], gPrefix[i - 1]);
507
508 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(rewriter, op, results);
509
510 LLVM_DEBUG({
511 llvm::dbgs() << "--------------------------------------- Completion\n"
512 << "RES0 = P0\n";
513 for (int64_t i = 1; i < width; ++i)
514 llvm::dbgs() << "RES" << i << " = P" << i << " XOR G" << i - 1 << "\n";
515 });
516 }
517
518 // Implement the Kogge-Stone parallel prefix tree
519 // Described in https://en.wikipedia.org/wiki/Kogge%E2%80%93Stone_adder
520 // Slightly better delay than Brent-Kung, but more area.
521 void lowerKoggeStonePrefixTree(comb::AddOp op, ValueRange inputs,
522 ConversionPatternRewriter &rewriter,
523 SmallVector<Value> &pPrefix,
524 SmallVector<Value> &gPrefix) const {
525 auto width = op.getType().getIntOrFloatBitWidth();
526
527 // Kogge-Stone parallel prefix computation
528 for (int64_t stride = 1; stride < width; stride *= 2) {
529 for (int64_t i = stride; i < width; ++i) {
530 int64_t j = i - stride;
531 // Group generate: g_i OR (p_i AND g_j)
532 Value andPG =
533 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], gPrefix[j]);
534 gPrefix[i] =
535 comb::OrOp::create(rewriter, op.getLoc(), gPrefix[i], andPG);
536
537 // Group propagate: p_i AND p_j
538 pPrefix[i] =
539 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], pPrefix[j]);
540 }
541 }
542 LLVM_DEBUG({
543 int64_t stage = 0;
544 for (int64_t stride = 1; stride < width; stride *= 2) {
545 llvm::dbgs()
546 << "--------------------------------------- Kogge-Stone Stage "
547 << stage << "\n";
548 for (int64_t i = stride; i < width; ++i) {
549 int64_t j = i - stride;
550 // Group generate: g_i OR (p_i AND g_j)
551 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
552 << " OR (P" << i << stage << " AND G" << j << stage
553 << ")\n";
554
555 // Group propagate: p_i AND p_j
556 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
557 << " AND P" << j << stage << "\n";
558 }
559 ++stage;
560 }
561 });
562 }
563
564 // Implement the Brent-Kung parallel prefix tree
565 // Described in https://en.wikipedia.org/wiki/Brent%E2%80%93Kung_adder
566 // Slightly worse delay than Kogge-Stone, but less area.
567 void lowerBrentKungPrefixTree(comb::AddOp op, ValueRange inputs,
568 ConversionPatternRewriter &rewriter,
569 SmallVector<Value> &pPrefix,
570 SmallVector<Value> &gPrefix) const {
571 auto width = op.getType().getIntOrFloatBitWidth();
572
573 // Brent-Kung parallel prefix computation
574 // Forward phase
575 int64_t stride;
576 for (stride = 1; stride < width; stride *= 2) {
577 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
578 int64_t j = i - stride;
579
580 // Group generate: g_i OR (p_i AND g_j)
581 Value andPG =
582 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], gPrefix[j]);
583 gPrefix[i] =
584 comb::OrOp::create(rewriter, op.getLoc(), gPrefix[i], andPG);
585
586 // Group propagate: p_i AND p_j
587 pPrefix[i] =
588 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], pPrefix[j]);
589 }
590 }
591
592 // Backward phase
593 for (; stride > 0; stride /= 2) {
594 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
595 int64_t j = i - stride;
596
597 // Group generate: g_i OR (p_i AND g_j)
598 Value andPG =
599 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], gPrefix[j]);
600 gPrefix[i] = OrOp::create(rewriter, op.getLoc(), gPrefix[i], andPG);
601
602 // Group propagate: p_i AND p_j
603 pPrefix[i] =
604 comb::AndOp::create(rewriter, op.getLoc(), pPrefix[i], pPrefix[j]);
605 }
606 }
607
608 LLVM_DEBUG({
609 int64_t stage = 0;
610 for (stride = 1; stride < width; stride *= 2) {
611 llvm::dbgs() << "--------------------------------------- Brent-Kung FW "
612 << stage << " : Stride " << stride << "\n";
613 for (int64_t i = stride * 2 - 1; i < width; i += stride * 2) {
614 int64_t j = i - stride;
615
616 // Group generate: g_i OR (p_i AND g_j)
617 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
618 << " OR (P" << i << stage << " AND G" << j << stage
619 << ")\n";
620
621 // Group propagate: p_i AND p_j
622 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
623 << " AND P" << j << stage << "\n";
624 }
625 ++stage;
626 }
627
628 for (; stride > 0; stride /= 2) {
629 if (stride * 3 - 1 < width)
630 llvm::dbgs()
631 << "--------------------------------------- Brent-Kung BW "
632 << stage << " : Stride " << stride << "\n";
633
634 for (int64_t i = stride * 3 - 1; i < width; i += stride * 2) {
635 int64_t j = i - stride;
636
637 // Group generate: g_i OR (p_i AND g_j)
638 llvm::dbgs() << "G" << i << stage + 1 << " = G" << i << stage
639 << " OR (P" << i << stage << " AND G" << j << stage
640 << ")\n";
641
642 // Group propagate: p_i AND p_j
643 llvm::dbgs() << "P" << i << stage + 1 << " = P" << i << stage
644 << " AND P" << j << stage << "\n";
645 }
646 --stage;
647 }
648 });
649 }
650};
651
652struct CombSubOpConversion : OpConversionPattern<SubOp> {
654 LogicalResult
655 matchAndRewrite(SubOp op, OpAdaptor adaptor,
656 ConversionPatternRewriter &rewriter) const override {
657 auto lhs = op.getLhs();
658 auto rhs = op.getRhs();
659 // Since `-rhs = ~rhs + 1` holds, rewrite `sub(lhs, rhs)` to:
660 // sub(lhs, rhs) => add(lhs, -rhs) => add(lhs, add(~rhs, 1))
661 // => add(lhs, ~rhs, 1)
662 auto notRhs = aig::AndInverterOp::create(rewriter, op.getLoc(), rhs,
663 /*invert=*/true);
664 auto one = hw::ConstantOp::create(rewriter, op.getLoc(), op.getType(), 1);
665 replaceOpWithNewOpAndCopyNamehint<comb::AddOp>(
666 rewriter, op, ValueRange{lhs, notRhs, one}, true);
667 return success();
668 }
669};
670
671struct CombMulOpConversion : OpConversionPattern<MulOp> {
673 using OpAdaptor = typename OpConversionPattern<MulOp>::OpAdaptor;
674 LogicalResult
675 matchAndRewrite(MulOp op, OpAdaptor adaptor,
676 ConversionPatternRewriter &rewriter) const override {
677 if (adaptor.getInputs().size() != 2)
678 return failure();
679
680 Location loc = op.getLoc();
681 Value a = adaptor.getInputs()[0];
682 Value b = adaptor.getInputs()[1];
683 unsigned width = op.getType().getIntOrFloatBitWidth();
684
685 // Skip a zero width value.
686 if (width == 0) {
687 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(), 0);
688 return success();
689 }
690
691 // Extract individual bits from operands
692 SmallVector<Value> aBits = extractBits(rewriter, a);
693 SmallVector<Value> bBits = extractBits(rewriter, b);
694
695 auto falseValue = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
696
697 // Generate partial products
698 SmallVector<SmallVector<Value>> partialProducts;
699 partialProducts.reserve(width);
700 for (unsigned i = 0; i < width; ++i) {
701 SmallVector<Value> row(i, falseValue);
702 row.reserve(width);
703 // Generate partial product bits
704 for (unsigned j = 0; i + j < width; ++j)
705 row.push_back(
706 rewriter.createOrFold<comb::AndOp>(loc, aBits[j], bBits[i]));
707
708 partialProducts.push_back(row);
709 }
710
711 // If the width is 1, we are done.
712 if (width == 1) {
713 rewriter.replaceOp(op, partialProducts[0][0]);
714 return success();
715 }
716
717 // Wallace tree reduction - reduce to two addends.
718 auto addends =
719 comb::wallaceReduction(rewriter, loc, width, 2, partialProducts);
720 // Sum the two addends using a carry-propagate adder
721 auto newAdd = comb::AddOp::create(rewriter, loc, addends, true);
722 replaceOpAndCopyNamehint(rewriter, op, newAdd);
723 return success();
724 }
725};
726
727template <typename OpTy>
728struct DivModOpConversionBase : OpConversionPattern<OpTy> {
729 DivModOpConversionBase(MLIRContext *context, int64_t maxEmulationUnknownBits)
730 : OpConversionPattern<OpTy>(context),
731 maxEmulationUnknownBits(maxEmulationUnknownBits) {
732 assert(maxEmulationUnknownBits < 32 &&
733 "maxEmulationUnknownBits must be less than 32");
734 }
735 const int64_t maxEmulationUnknownBits;
736};
737
738struct CombDivUOpConversion : DivModOpConversionBase<DivUOp> {
739 using DivModOpConversionBase<DivUOp>::DivModOpConversionBase;
740 LogicalResult
741 matchAndRewrite(DivUOp op, OpAdaptor adaptor,
742 ConversionPatternRewriter &rewriter) const override {
743 // Check if the divisor is a power of two.
744 if (auto rhsConstantOp = adaptor.getRhs().getDefiningOp<hw::ConstantOp>())
745 if (rhsConstantOp.getValue().isPowerOf2()) {
746 // Extract upper bits.
747 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
748 size_t width = op.getType().getIntOrFloatBitWidth();
749 Value upperBits = rewriter.createOrFold<comb::ExtractOp>(
750 op.getLoc(), adaptor.getLhs(), extractAmount,
751 width - extractAmount);
752 Value constZero = hw::ConstantOp::create(rewriter, op.getLoc(),
753 APInt::getZero(extractAmount));
754 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
755 rewriter, op, op.getType(), ArrayRef<Value>{constZero, upperBits});
756 return success();
757 }
758
759 // When rhs is not power of two and the number of unknown bits are small,
760 // create a mux tree that emulates all possible cases.
762 rewriter, maxEmulationUnknownBits, op,
763 [](const APInt &lhs, const APInt &rhs) {
764 // Division by zero is undefined, just return zero.
765 if (rhs.isZero())
766 return APInt::getZero(rhs.getBitWidth());
767 return lhs.udiv(rhs);
768 });
769 }
770};
771
772struct CombModUOpConversion : DivModOpConversionBase<ModUOp> {
773 using DivModOpConversionBase<ModUOp>::DivModOpConversionBase;
774 LogicalResult
775 matchAndRewrite(ModUOp op, OpAdaptor adaptor,
776 ConversionPatternRewriter &rewriter) const override {
777 // Check if the divisor is a power of two.
778 if (auto rhsConstantOp = adaptor.getRhs().getDefiningOp<hw::ConstantOp>())
779 if (rhsConstantOp.getValue().isPowerOf2()) {
780 // Extract lower bits.
781 size_t extractAmount = rhsConstantOp.getValue().ceilLogBase2();
782 size_t width = op.getType().getIntOrFloatBitWidth();
783 Value lowerBits = rewriter.createOrFold<comb::ExtractOp>(
784 op.getLoc(), adaptor.getLhs(), 0, extractAmount);
785 Value constZero = hw::ConstantOp::create(
786 rewriter, op.getLoc(), APInt::getZero(width - extractAmount));
787 replaceOpWithNewOpAndCopyNamehint<comb::ConcatOp>(
788 rewriter, op, op.getType(), ArrayRef<Value>{constZero, lowerBits});
789 return success();
790 }
791
792 // When rhs is not power of two and the number of unknown bits are small,
793 // create a mux tree that emulates all possible cases.
795 rewriter, maxEmulationUnknownBits, op,
796 [](const APInt &lhs, const APInt &rhs) {
797 // Division by zero is undefined, just return zero.
798 if (rhs.isZero())
799 return APInt::getZero(rhs.getBitWidth());
800 return lhs.urem(rhs);
801 });
802 }
803};
804
805struct CombDivSOpConversion : DivModOpConversionBase<DivSOp> {
806 using DivModOpConversionBase<DivSOp>::DivModOpConversionBase;
807
808 LogicalResult
809 matchAndRewrite(DivSOp op, OpAdaptor adaptor,
810 ConversionPatternRewriter &rewriter) const override {
811 // Currently only lower with emulation.
812 // TODO: Implement a signed division lowering at least for power of two.
814 rewriter, maxEmulationUnknownBits, op,
815 [](const APInt &lhs, const APInt &rhs) {
816 // Division by zero is undefined, just return zero.
817 if (rhs.isZero())
818 return APInt::getZero(rhs.getBitWidth());
819 return lhs.sdiv(rhs);
820 });
821 }
822};
823
824struct CombModSOpConversion : DivModOpConversionBase<ModSOp> {
825 using DivModOpConversionBase<ModSOp>::DivModOpConversionBase;
826 LogicalResult
827 matchAndRewrite(ModSOp op, OpAdaptor adaptor,
828 ConversionPatternRewriter &rewriter) const override {
829 // Currently only lower with emulation.
830 // TODO: Implement a signed modulus lowering at least for power of two.
832 rewriter, maxEmulationUnknownBits, op,
833 [](const APInt &lhs, const APInt &rhs) {
834 // Division by zero is undefined, just return zero.
835 if (rhs.isZero())
836 return APInt::getZero(rhs.getBitWidth());
837 return lhs.srem(rhs);
838 });
839 }
840};
841
842struct CombICmpOpConversion : OpConversionPattern<ICmpOp> {
844 static Value constructUnsignedCompare(ICmpOp op, ArrayRef<Value> aBits,
845 ArrayRef<Value> bBits, bool isLess,
846 bool includeEq,
847 ConversionPatternRewriter &rewriter) {
848 // Construct following unsigned comparison expressions.
849 // a <= b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] <= b[n-1:0])
850 // a < b ==> (~a[n] & b[n]) | (a[n] == b[n] & a[n-1:0] < b[n-1:0])
851 // a >= b ==> ( a[n] & ~b[n]) | (a[n] == b[n] & a[n-1:0] >= b[n-1:0])
852 // a > b ==> ( a[n] & ~b[n]) | (a[n] == b[n] & a[n-1:0] > b[n-1:0])
853 Value acc =
854 hw::ConstantOp::create(rewriter, op.getLoc(), op.getType(), includeEq);
855
856 for (auto [aBit, bBit] : llvm::zip(aBits, bBits)) {
857 auto aBitXorBBit =
858 rewriter.createOrFold<comb::XorOp>(op.getLoc(), aBit, bBit, true);
859 auto aEqualB = rewriter.createOrFold<aig::AndInverterOp>(
860 op.getLoc(), aBitXorBBit, true);
861 auto pred = rewriter.createOrFold<aig::AndInverterOp>(
862 op.getLoc(), aBit, bBit, isLess, !isLess);
863
864 auto aBitAndBBit = rewriter.createOrFold<comb::AndOp>(
865 op.getLoc(), ValueRange{aEqualB, acc}, true);
866 acc = rewriter.createOrFold<comb::OrOp>(op.getLoc(), pred, aBitAndBBit,
867 true);
868 }
869 return acc;
870 }
871
872 LogicalResult
873 matchAndRewrite(ICmpOp op, OpAdaptor adaptor,
874 ConversionPatternRewriter &rewriter) const override {
875 auto lhs = adaptor.getLhs();
876 auto rhs = adaptor.getRhs();
877
878 switch (op.getPredicate()) {
879 default:
880 return failure();
881
882 case ICmpPredicate::eq:
883 case ICmpPredicate::ceq: {
884 // a == b ==> ~(a[n] ^ b[n]) & ~(a[n-1] ^ b[n-1]) & ...
885 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
886 auto xorBits = extractBits(rewriter, xorOp);
887 SmallVector<bool> allInverts(xorBits.size(), true);
888 replaceOpWithNewOpAndCopyNamehint<aig::AndInverterOp>(
889 rewriter, op, xorBits, allInverts);
890 return success();
891 }
892
893 case ICmpPredicate::ne:
894 case ICmpPredicate::cne: {
895 // a != b ==> (a[n] ^ b[n]) | (a[n-1] ^ b[n-1]) | ...
896 auto xorOp = rewriter.createOrFold<comb::XorOp>(op.getLoc(), lhs, rhs);
897 replaceOpWithNewOpAndCopyNamehint<comb::OrOp>(
898 rewriter, op, extractBits(rewriter, xorOp), true);
899 return success();
900 }
901
902 case ICmpPredicate::uge:
903 case ICmpPredicate::ugt:
904 case ICmpPredicate::ule:
905 case ICmpPredicate::ult: {
906 bool isLess = op.getPredicate() == ICmpPredicate::ult ||
907 op.getPredicate() == ICmpPredicate::ule;
908 bool includeEq = op.getPredicate() == ICmpPredicate::uge ||
909 op.getPredicate() == ICmpPredicate::ule;
910 auto aBits = extractBits(rewriter, lhs);
911 auto bBits = extractBits(rewriter, rhs);
912 replaceOpAndCopyNamehint(rewriter, op,
913 constructUnsignedCompare(op, aBits, bBits,
914 isLess, includeEq,
915 rewriter));
916 return success();
917 }
918 case ICmpPredicate::slt:
919 case ICmpPredicate::sle:
920 case ICmpPredicate::sgt:
921 case ICmpPredicate::sge: {
922 if (lhs.getType().getIntOrFloatBitWidth() == 0)
923 return rewriter.notifyMatchFailure(
924 op.getLoc(), "i0 signed comparison is unsupported");
925 bool isLess = op.getPredicate() == ICmpPredicate::slt ||
926 op.getPredicate() == ICmpPredicate::sle;
927 bool includeEq = op.getPredicate() == ICmpPredicate::sge ||
928 op.getPredicate() == ICmpPredicate::sle;
929
930 auto aBits = extractBits(rewriter, lhs);
931 auto bBits = extractBits(rewriter, rhs);
932
933 // Get a sign bit
934 auto signA = aBits.back();
935 auto signB = bBits.back();
936
937 // Compare magnitudes (all bits except sign)
938 auto sameSignResult = constructUnsignedCompare(
939 op, ArrayRef(aBits).drop_back(), ArrayRef(bBits).drop_back(), isLess,
940 includeEq, rewriter);
941
942 // XOR of signs: true if signs are different
943 auto signsDiffer =
944 comb::XorOp::create(rewriter, op.getLoc(), signA, signB);
945
946 // Result when signs are different
947 Value diffSignResult = isLess ? signA : signB;
948
949 // Final result: choose based on whether signs differ
950 replaceOpWithNewOpAndCopyNamehint<comb::MuxOp>(
951 rewriter, op, signsDiffer, diffSignResult, sameSignResult);
952 return success();
953 }
954 }
955 }
956};
957
958struct CombParityOpConversion : OpConversionPattern<ParityOp> {
960
961 LogicalResult
962 matchAndRewrite(ParityOp op, OpAdaptor adaptor,
963 ConversionPatternRewriter &rewriter) const override {
964 // Parity is the XOR of all bits.
965 replaceOpWithNewOpAndCopyNamehint<comb::XorOp>(
966 rewriter, op, extractBits(rewriter, adaptor.getInput()), true);
967 return success();
968 }
969};
970
971struct CombShlOpConversion : OpConversionPattern<comb::ShlOp> {
973
974 LogicalResult
975 matchAndRewrite(comb::ShlOp op, OpAdaptor adaptor,
976 ConversionPatternRewriter &rewriter) const override {
977 auto width = op.getType().getIntOrFloatBitWidth();
978 auto lhs = adaptor.getLhs();
979 auto result = createShiftLogic</*isLeftShift=*/true>(
980 rewriter, op.getLoc(), adaptor.getRhs(), width,
981 /*getPadding=*/
982 [&](int64_t index) {
983 // Don't create zero width value.
984 if (index == 0)
985 return Value();
986 // Padding is 0 for left shift.
987 return rewriter.createOrFold<hw::ConstantOp>(
988 op.getLoc(), rewriter.getIntegerType(index), 0);
989 },
990 /*getExtract=*/
991 [&](int64_t index) {
992 assert(index < width && "index out of bounds");
993 // Exract the bits from LSB.
994 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, 0,
995 width - index);
996 });
997
998 replaceOpAndCopyNamehint(rewriter, op, result);
999 return success();
1000 }
1001};
1002
1003struct CombShrUOpConversion : OpConversionPattern<comb::ShrUOp> {
1005
1006 LogicalResult
1007 matchAndRewrite(comb::ShrUOp op, OpAdaptor adaptor,
1008 ConversionPatternRewriter &rewriter) const override {
1009 auto width = op.getType().getIntOrFloatBitWidth();
1010 auto lhs = adaptor.getLhs();
1011 auto result = createShiftLogic</*isLeftShift=*/false>(
1012 rewriter, op.getLoc(), adaptor.getRhs(), width,
1013 /*getPadding=*/
1014 [&](int64_t index) {
1015 // Don't create zero width value.
1016 if (index == 0)
1017 return Value();
1018 // Padding is 0 for right shift.
1019 return rewriter.createOrFold<hw::ConstantOp>(
1020 op.getLoc(), rewriter.getIntegerType(index), 0);
1021 },
1022 /*getExtract=*/
1023 [&](int64_t index) {
1024 assert(index < width && "index out of bounds");
1025 // Exract the bits from MSB.
1026 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1027 width - index);
1028 });
1029
1030 replaceOpAndCopyNamehint(rewriter, op, result);
1031 return success();
1032 }
1033};
1034
1035struct CombShrSOpConversion : OpConversionPattern<comb::ShrSOp> {
1037
1038 LogicalResult
1039 matchAndRewrite(comb::ShrSOp op, OpAdaptor adaptor,
1040 ConversionPatternRewriter &rewriter) const override {
1041 auto width = op.getType().getIntOrFloatBitWidth();
1042 if (width == 0)
1043 return rewriter.notifyMatchFailure(op.getLoc(),
1044 "i0 signed shift is unsupported");
1045 auto lhs = adaptor.getLhs();
1046 // Get the sign bit.
1047 auto sign =
1048 rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, width - 1, 1);
1049
1050 // NOTE: The max shift amount is width - 1 because the sign bit is
1051 // already shifted out.
1052 auto result = createShiftLogic</*isLeftShift=*/false>(
1053 rewriter, op.getLoc(), adaptor.getRhs(), width - 1,
1054 /*getPadding=*/
1055 [&](int64_t index) {
1056 return rewriter.createOrFold<comb::ReplicateOp>(op.getLoc(), sign,
1057 index + 1);
1058 },
1059 /*getExtract=*/
1060 [&](int64_t index) {
1061 return rewriter.createOrFold<comb::ExtractOp>(op.getLoc(), lhs, index,
1062 width - index - 1);
1063 });
1064
1065 replaceOpAndCopyNamehint(rewriter, op, result);
1066 return success();
1067 }
1068};
1069
1070} // namespace
1071
1072//===----------------------------------------------------------------------===//
1073// Convert Comb to AIG pass
1074//===----------------------------------------------------------------------===//
1075
1076namespace {
1077struct ConvertCombToAIGPass
1078 : public impl::ConvertCombToAIGBase<ConvertCombToAIGPass> {
1079 void runOnOperation() override;
1080 using ConvertCombToAIGBase<ConvertCombToAIGPass>::ConvertCombToAIGBase;
1081 using ConvertCombToAIGBase<ConvertCombToAIGPass>::additionalLegalOps;
1082 using ConvertCombToAIGBase<ConvertCombToAIGPass>::maxEmulationUnknownBits;
1083};
1084} // namespace
1085
1086static void
1088 uint32_t maxEmulationUnknownBits) {
1089 patterns.add<
1090 // Bitwise Logical Ops
1091 CombAndOpConversion, CombOrOpConversion, CombXorOpConversion,
1092 CombMuxOpConversion, CombParityOpConversion,
1093 // Arithmetic Ops
1094 CombAddOpConversion, CombSubOpConversion, CombMulOpConversion,
1095 CombICmpOpConversion,
1096 // Shift Ops
1097 CombShlOpConversion, CombShrUOpConversion, CombShrSOpConversion,
1098 // Variadic ops that must be lowered to binary operations
1099 CombLowerVariadicOp<XorOp>, CombLowerVariadicOp<AddOp>,
1100 CombLowerVariadicOp<MulOp>>(patterns.getContext());
1101
1102 // Add div/mod patterns with a threshold given by the pass option.
1103 patterns.add<CombDivUOpConversion, CombModUOpConversion, CombDivSOpConversion,
1104 CombModSOpConversion>(patterns.getContext(),
1105 maxEmulationUnknownBits);
1106}
1107
1108void ConvertCombToAIGPass::runOnOperation() {
1109 ConversionTarget target(getContext());
1110
1111 // Comb is source dialect.
1112 target.addIllegalDialect<comb::CombDialect>();
1113 // Keep data movement operations like Extract, Concat and Replicate.
1114 target.addLegalOp<comb::ExtractOp, comb::ConcatOp, comb::ReplicateOp,
1116
1117 // Treat array operations as illegal. Strictly speaking, other than array
1118 // get operation with non-const index are legal in AIG but array types
1119 // prevent a bunch of optimizations so just lower them to integer
1120 // operations. It's required to run HWAggregateToComb pass before this pass.
1122 hw::AggregateConstantOp>();
1123
1124 // AIG is target dialect.
1125 target.addLegalDialect<aig::AIGDialect>();
1126
1127 // If additional legal ops are specified, add them to the target.
1128 if (!additionalLegalOps.empty())
1129 for (const auto &opName : additionalLegalOps)
1130 target.addLegalOp(OperationName(opName, &getContext()));
1131
1132 RewritePatternSet patterns(&getContext());
1133 populateCombToAIGConversionPatterns(patterns, maxEmulationUnknownBits);
1134
1135 if (failed(mlir::applyPartialConversion(getOperation(), target,
1136 std::move(patterns))))
1137 return signalPassFailure();
1138}
assert(baseType &&"element must be base type")
static SmallVector< T > concat(const SmallVectorImpl< T > &a, const SmallVectorImpl< T > &b)
Returns a new vector containing the concatenation of vectors a and b.
Definition CalyxOps.cpp:540
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
Definition CombToAIG.cpp:38
static Value createShiftLogic(ConversionPatternRewriter &rewriter, Location loc, Value shiftAmount, int64_t maxShiftAmount, llvm::function_ref< Value(int64_t)> getPadding, llvm::function_ref< Value(int64_t)> getExtract)
Definition CombToAIG.cpp:52
static APInt substitueMaskToValues(size_t width, llvm::SmallVectorImpl< ConstantOrValue > &constantOrValues, uint32_t mask)
static LogicalResult emulateBinaryOpForUnknownBits(ConversionPatternRewriter &rewriter, int64_t maxEmulationUnknownBits, Operation *op, llvm::function_ref< APInt(const APInt &, const APInt &)> emulate)
static int64_t getNumUnknownBitsAndPopulateValues(Value value, llvm::SmallVectorImpl< ConstantOrValue > &values)
static void populateCombToAIGConversionPatterns(RewritePatternSet &patterns, uint32_t maxEmulationUnknownBits)
static std::optional< APSInt > getConstant(Attribute operand)
Determine the value of a constant operand for the sake of constant folding.
static Value lowerFullyAssociativeOp(Operation &op, OperandRange operands, SmallVector< Operation * > &newOps)
Lower a variadic fully-associative operation into an expression tree.
create(data_type, value)
Definition hw.py:441
create(data_type, value)
Definition hw.py:433
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
Definition Naming.cpp:73
Definition comb.py:1