15#include "mlir/Analysis/TopologicalSortUtils.h"
16#include "mlir/IR/BuiltinAttributes.h"
17#include "mlir/IR/Matchers.h"
18#include "mlir/IR/OpDefinition.h"
19#include "mlir/IR/PatternMatch.h"
20#include "mlir/IR/Value.h"
21#include "llvm/ADT/APInt.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/Support/Casting.h"
24#include "llvm/Support/LogicalResult.h"
29using namespace circt::synth::aig;
32#include "circt/Dialect/Synth/Synth.cpp.inc"
38inline APInt applyInversion(APInt value,
bool inverted) {
44inline llvm::KnownBits applyInversion(llvm::KnownBits value,
bool inverted) {
46 std::swap(value.Zero, value.One);
52LogicalResult ChoiceOp::verify() {
53 if (getNumOperands() < 1)
54 return emitOpError(
"requires at least one operand");
58OpFoldResult ChoiceOp::fold(FoldAdaptor adaptor) {
59 if (adaptor.getInputs().size() == 1)
73LogicalResult ChoiceOp::canonicalize(ChoiceOp op, PatternRewriter &rewriter) {
74 llvm::SetVector<Value> worklist;
77 auto addToWorklist = [&](ChoiceOp choice) ->
bool {
78 if (choice->getBlock() == op->getBlock() && visitedChoices.insert(choice)) {
79 worklist.insert(choice.getInputs().begin(), choice.getInputs().end());
87 bool mergedOtherChoices =
false;
90 for (
unsigned i = 0; i < worklist.size(); ++i) {
91 Value val = worklist[i];
92 if (
auto defOp = val.getDefiningOp<synth::ChoiceOp>()) {
94 if (addToWorklist(defOp))
95 mergedOtherChoices =
true;
98 for (Operation *user : val.getUsers()) {
99 if (
auto userChoice = llvm::dyn_cast<synth::ChoiceOp>(user)) {
100 if (addToWorklist(userChoice)) {
101 mergedOtherChoices =
true;
107 llvm::SmallVector<mlir::Value> finalOperands;
108 for (Value v : worklist) {
109 if (!visitedChoices.contains(v.getDefiningOp())) {
110 finalOperands.push_back(v);
114 if (!mergedOtherChoices && finalOperands.size() == op.getInputs().size())
115 return llvm::failure();
117 auto newChoice = synth::ChoiceOp::create(rewriter, op->getLoc(), op.getType(),
119 for (Operation *visited : visitedChoices.takeVector())
120 rewriter.replaceOp(visited, newChoice);
122 for (
auto value : newChoice.getInputs())
123 rewriter.replaceAllUsesExcept(value, newChoice.getResult(), newChoice);
132bool AndInverterOp::areInputsPermutationInvariant() {
return true; }
134OpFoldResult AndInverterOp::fold(FoldAdaptor adaptor) {
135 if (getNumOperands() == 1 && !isInverted(0))
136 return getOperand(0);
138 auto inputs = adaptor.getInputs();
139 if (inputs.size() == 2)
140 if (
auto intAttr = dyn_cast_or_null<IntegerAttr>(inputs[1])) {
141 auto value = intAttr.getValue();
145 return IntegerAttr::get(
146 IntegerType::get(getContext(), value.getBitWidth()), value);
147 if (value.isAllOnes()) {
151 return getOperand(0);
157LogicalResult AndInverterOp::canonicalize(AndInverterOp op,
158 PatternRewriter &rewriter) {
160 SmallVector<Value> uniqueValues;
161 SmallVector<bool> uniqueInverts;
164 APInt::getAllOnes(op.getResult().getType().getIntOrFloatBitWidth());
166 bool invertedConstFound =
false;
167 bool flippedFound =
false;
169 for (
auto [value, inverted] :
llvm::zip(op.getInputs(), op.getInverted())) {
170 bool newInverted = inverted;
173 constValue &= ~constOp.getValue();
174 invertedConstFound =
true;
176 constValue &= constOp.getValue();
181 if (
auto andInverterOp = value.getDefiningOp<synth::aig::AndInverterOp>()) {
182 if (andInverterOp.getInputs().size() == 1 &&
183 andInverterOp.isInverted(0)) {
184 value = andInverterOp.getOperand(0);
185 newInverted = andInverterOp.isInverted(0) ^ inverted;
190 auto it = seen.find(value);
191 if (it == seen.end()) {
192 seen.insert({value, newInverted});
193 uniqueValues.push_back(value);
194 uniqueInverts.push_back(newInverted);
195 }
else if (it->second != newInverted) {
198 op, APInt::getZero(value.getType().getIntOrFloatBitWidth()));
204 if (constValue.isZero()) {
210 if ((uniqueValues.size() == op.getInputs().size() && !flippedFound) ||
211 (!constValue.isAllOnes() && !invertedConstFound &&
212 uniqueValues.size() + 1 == op.getInputs().size()))
215 if (!constValue.isAllOnes()) {
217 uniqueInverts.push_back(
false);
218 uniqueValues.push_back(constOp);
222 if (uniqueValues.empty()) {
228 replaceOpWithNewOpAndCopyNamehint<synth::aig::AndInverterOp>(
229 rewriter, op, uniqueValues, uniqueInverts);
233APInt AndInverterOp::evaluateBooleanLogic(
234 llvm::function_ref<
const APInt &(
unsigned)> getInputValue) {
235 assert(getNumOperands() > 0 &&
"Expected non-empty input list");
236 APInt result = APInt::getAllOnes(getInputValue(0).
getBitWidth());
237 for (
auto [idx, inverted] :
llvm::enumerate(getInverted())) {
238 const APInt &input = getInputValue(idx);
240 result &= applyInversion(input, inverted);
245llvm::KnownBits AndInverterOp::computeKnownBits(
246 llvm::function_ref<
const llvm::KnownBits &(
unsigned)> getInputKnownBits) {
247 assert(getNumOperands() > 0 &&
"Expected non-empty input list");
249 auto width = getInputKnownBits(0).getBitWidth();
250 llvm::KnownBits result(width);
251 result.One = APInt::getAllOnes(width);
252 result.Zero = APInt::getZero(width);
254 for (
auto [i, inverted] :
llvm::enumerate(getInverted()))
255 result &= applyInversion(getInputKnownBits(i), inverted);
260int64_t AndInverterOp::getLogicDepthCost() {
261 return llvm::Log2_64_Ceil(getNumOperands());
264std::optional<uint64_t> AndInverterOp::getLogicAreaCost() {
265 int64_t bitWidth = hw::getBitWidth(getType());
268 return static_cast<uint64_t
>(getNumOperands() - 1) * bitWidth;
271void AndInverterOp::emitCNFWithoutInversion(
272 int outVar, llvm::ArrayRef<int> inputVars,
273 llvm::function_ref<
void(llvm::ArrayRef<int>)> addClause,
274 llvm::function_ref<
int()> newVar) {
283bool XorInverterOp::areInputsPermutationInvariant() {
return true; }
285APInt XorInverterOp::evaluateBooleanLogic(
286 llvm::function_ref<
const APInt &(
unsigned)> getInputValue) {
287 assert(getNumOperands() > 0 &&
"Expected non-empty input list");
288 APInt result = APInt::getZero(getInputValue(0).
getBitWidth());
289 for (
auto [idx, inverted] :
llvm::enumerate(getInverted()))
290 result ^= applyInversion(getInputValue(idx), inverted);
294llvm::KnownBits XorInverterOp::computeKnownBits(
295 llvm::function_ref<
const llvm::KnownBits &(
unsigned)> getInputKnownBits) {
296 assert(getNumOperands() > 0 &&
"Expected non-empty input list");
298 llvm::KnownBits result(getInputKnownBits(0).
getBitWidth());
299 for (
auto [i, inverted] :
llvm::enumerate(getInverted()))
300 result ^= applyInversion(getInputKnownBits(i), inverted);
304int64_t XorInverterOp::getLogicDepthCost() {
305 return llvm::Log2_64_Ceil(getNumOperands());
308std::optional<uint64_t> XorInverterOp::getLogicAreaCost() {
309 int64_t bitWidth = hw::getBitWidth(getType());
312 return static_cast<uint64_t
>(getNumOperands() - 1) * bitWidth;
315void XorInverterOp::emitCNFWithoutInversion(
316 int outVar, llvm::ArrayRef<int> inputVars,
317 llvm::function_ref<
void(llvm::ArrayRef<int>)> addClause,
318 llvm::function_ref<
int()> newVar) {
323 Location loc, ValueRange operands, ArrayRef<bool> inverts,
324 PatternRewriter &rewriter,
325 llvm::function_ref<Value(Value,
bool)> createUnary,
326 llvm::function_ref<Value(Value, Value,
bool,
bool)> createBinary) {
327 switch (operands.size()) {
329 assert(0 &&
"cannot be called with empty operand range");
332 return inverts[0] ? createUnary(operands[0],
true) : operands[0];
334 return createBinary(operands[0], operands[1], inverts[0], inverts[1]);
336 auto firstHalf = operands.size() / 2;
338 inverts.take_front(firstHalf),
339 rewriter, createUnary, createBinary);
341 inverts.drop_front(firstHalf),
342 rewriter, createUnary, createBinary);
343 return createBinary(lhs, rhs,
false,
false);
348template <
typename OpTy>
350 PatternRewriter &rewriter) {
351 if (op.getInputs().size() <= 2)
354 op.getLoc(), op.getOperands(), op.getInverted(), rewriter,
355 [&](Value input,
bool invert) {
356 return OpTy::create(rewriter, op.getLoc(), input, invert);
358 [&](Value lhs, Value rhs,
bool invertLhs,
bool invertRhs) {
359 return OpTy::create(rewriter, op.getLoc(), lhs, rhs, invertLhs,
368 patterns.add(lowerVariadicAndInverterOpConversion<aig::AndInverterOp>);
373 patterns.add(lowerVariadicAndInverterOpConversion<XorInverterOp>);
378 llvm::function_ref<
bool(mlir::Value, mlir::Operation *)> isOperandReady) {
380 auto walkResult = op->walk([&](Region *region) {
382 dyn_cast<mlir::RegionKindInterface>(region->getParentOp());
384 regionKindOp.hasSSADominance(region->getRegionNumber()))
385 return WalkResult::advance();
388 for (
auto &block : *region) {
389 if (!mlir::sortTopologically(&block, isOperandReady))
390 return WalkResult::interrupt();
392 return WalkResult::advance();
395 return success(!walkResult.wasInterrupted());
assert(baseType &&"element must be base type")
LogicalResult lowerVariadicAndInverterOpConversion(OpTy op, PatternRewriter &rewriter)
static Value lowerVariadicInvertibleOp(Location loc, ValueRange operands, ArrayRef< bool > inverts, PatternRewriter &rewriter, llvm::function_ref< Value(Value, bool)> createUnary, llvm::function_ref< Value(Value, Value, bool, bool)> createBinary)
int64_t getBitWidth(mlir::Type type)
Return the hardware bit width of a type.
void populateVariadicXorInverterLoweringPatterns(mlir::RewritePatternSet &patterns)
LogicalResult topologicallySortGraphRegionBlocks(mlir::Operation *op, llvm::function_ref< bool(mlir::Value, mlir::Operation *)> isOperandReady)
This function performs a topological sort on the operations within each block of graph regions in the...
void populateVariadicAndInverterLoweringPatterns(mlir::RewritePatternSet &patterns)
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
void addAndClauses(int outVar, llvm::ArrayRef< int > inputLits, llvm::function_ref< void(llvm::ArrayRef< int >)> addClause)
Emit clauses encoding outVar <=> and(inputLits).
void replaceOpAndCopyNamehint(PatternRewriter &rewriter, Operation *op, Value newValue)
A wrapper of PatternRewriter::replaceOp to propagate "sv.namehint" attribute.
void addParityClauses(int outVar, llvm::ArrayRef< int > inputLits, llvm::function_ref< void(llvm::ArrayRef< int >)> addClause, llvm::function_ref< int()> newVar)
Emit clauses encoding outVar <=> parity(inputLits).