CIRCT 22.0.0git
Loading...
Searching...
No Matches
DatapathToComb.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
14#include "mlir/Analysis/TopologicalSortUtils.h"
15#include "mlir/Dialect/Func/IR/FuncOps.h"
16#include "mlir/IR/PatternMatch.h"
17#include "mlir/Pass/Pass.h"
18#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
19#include "llvm/Support/Debug.h"
20#include "llvm/Support/KnownBits.h"
21#include <algorithm>
22
23#define DEBUG_TYPE "datapath-to-comb"
24
25namespace circt {
26#define GEN_PASS_DEF_CONVERTDATAPATHTOCOMB
27#include "circt/Conversion/Passes.h.inc"
28} // namespace circt
29
30using namespace circt;
31using namespace datapath;
32
33// A wrapper for comb::extractBits that returns a SmallVector<Value>.
34static SmallVector<Value> extractBits(OpBuilder &builder, Value val) {
35 SmallVector<Value> bits;
36 comb::extractBits(builder, val, bits);
37 return bits;
38}
39
40//===----------------------------------------------------------------------===//
41// Conversion patterns
42//===----------------------------------------------------------------------===//
43
44namespace {
45// Replace compressor by an adder of the inputs and zero for the other results:
46// compress(a,b,c,d) -> {a+b+c+d, 0}
47// Facilitates use of downstream compression algorithms e.g. Yosys
48struct DatapathCompressOpAddConversion : mlir::OpRewritePattern<CompressOp> {
50 LogicalResult
51 matchAndRewrite(CompressOp op,
52 mlir::PatternRewriter &rewriter) const override {
53 Location loc = op.getLoc();
54 auto inputs = op.getOperands();
55 unsigned width = inputs[0].getType().getIntOrFloatBitWidth();
56 // Sum all the inputs - set that to result value 0
57 auto addOp = comb::AddOp::create(rewriter, loc, inputs, true);
58 // Replace remaining results with zeros
59 auto zeroOp = hw::ConstantOp::create(rewriter, loc, APInt(width, 0));
60 SmallVector<Value> results(op.getNumResults() - 1, zeroOp);
61 results.push_back(addOp);
62 rewriter.replaceOp(op, results);
63 return success();
64 }
65};
66
67// Replace compressor by a wallace tree of full-adders
68struct DatapathCompressOpConversion : mlir::OpRewritePattern<CompressOp> {
69 DatapathCompressOpConversion(MLIRContext *context,
71 : mlir::OpRewritePattern<CompressOp>(context), analysis(analysis) {}
72
73 LogicalResult
74 matchAndRewrite(CompressOp op,
75 mlir::PatternRewriter &rewriter) const override {
76 Location loc = op.getLoc();
77 auto inputs = op.getOperands();
78
79 SmallVector<SmallVector<Value>> addends;
80 for (auto input : inputs) {
81 addends.push_back(
82 extractBits(rewriter, input)); // Extract bits from each input
83 }
84
85 // Compressor tree reduction
86 auto width = inputs[0].getType().getIntOrFloatBitWidth();
87 auto targetAddends = op.getNumResults();
88 datapath::CompressorTree comp(width, addends, loc);
89
90 if (analysis) {
91 // Update delay information with arrival times
92 if (failed(comp.withInputDelays(
93 [&](Value v) { return analysis->getMaxDelay(v, 0); })))
94 return failure();
95 }
96
97 rewriter.replaceOp(op, comp.compressToHeight(rewriter, targetAddends));
98 return success();
99 }
100
101private:
102 synth::IncrementalLongestPathAnalysis *analysis = nullptr;
103};
104
105struct DatapathPartialProductOpConversion : OpRewritePattern<PartialProductOp> {
106 using OpRewritePattern<PartialProductOp>::OpRewritePattern;
107
108 DatapathPartialProductOpConversion(MLIRContext *context, bool forceBooth)
109 : OpRewritePattern<PartialProductOp>(context), forceBooth(forceBooth){};
110
111 const bool forceBooth;
112
113 LogicalResult matchAndRewrite(PartialProductOp op,
114 PatternRewriter &rewriter) const override {
115
116 Value a = op.getLhs();
117 Value b = op.getRhs();
118 unsigned width = a.getType().getIntOrFloatBitWidth();
119
120 // Skip a zero width value.
121 if (width == 0) {
122 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(0), 0);
123 return success();
124 }
125
126 // Square partial product array can be reduced to upper triangular array.
127 // For example: AND array for a 4-bit squarer:
128 // 0 0 0 a0a3 a0a2 a0a1 a0a0
129 // 0 0 a1a3 a1a2 a1a1 a1a0 0
130 // 0 a2a3 a2a2 a2a1 a2a0 0 0
131 // a3a3 a3a2 a3a1 a3a0 0 0 0
132 //
133 // Can be reduced to:
134 // 0 0 a0a3 a0a2 a0a1 0 a0
135 // 0 a1a3 a1a2 0 a1 0 0
136 // a2a3 0 a2 0 0 0 0
137 // a3 0 0 0 0 0 0
138 if (a == b)
139 return lowerSqrAndArray(rewriter, a, op, width);
140
141 // Use result rows as a heuristic to guide partial product
142 // implementation
143 if (op.getNumResults() > 16 || forceBooth)
144 return lowerBoothArray(rewriter, a, b, op, width);
145 else
146 return lowerAndArray(rewriter, a, b, op, width);
147 }
148
149private:
150 static LogicalResult lowerAndArray(PatternRewriter &rewriter, Value a,
151 Value b, PartialProductOp op,
152 unsigned width) {
153
154 Location loc = op.getLoc();
155 // Keep a as a bitvector - multiply by each digit of b
156 SmallVector<Value> bBits = extractBits(rewriter, b);
157
158 SmallVector<Value> partialProducts;
159 partialProducts.reserve(width);
160 // AND Array Construction:
161 // partialProducts[i] = ({b[i],..., b[i]} & a) << i
162 assert(op.getNumResults() <= width &&
163 "Cannot return more results than the operator width");
164
165 for (unsigned i = 0; i < op.getNumResults(); ++i) {
166 auto repl =
167 rewriter.createOrFold<comb::ReplicateOp>(loc, bBits[i], width);
168 auto ppRow = rewriter.createOrFold<comb::AndOp>(loc, repl, a);
169 if (i == 0) {
170 partialProducts.push_back(ppRow);
171 continue;
172 }
173 auto shiftBy = hw::ConstantOp::create(rewriter, loc, APInt(i, 0));
174 auto ppAlign =
175 comb::ConcatOp::create(rewriter, loc, ValueRange{ppRow, shiftBy});
176 auto ppAlignTrunc = rewriter.createOrFold<comb::ExtractOp>(
177 loc, ppAlign, 0, width); // Truncate to width+i bits
178 partialProducts.push_back(ppAlignTrunc);
179 }
180
181 rewriter.replaceOp(op, partialProducts);
182 return success();
183 }
184
185 static LogicalResult lowerSqrAndArray(PatternRewriter &rewriter, Value a,
186 PartialProductOp op, unsigned width) {
187
188 Location loc = op.getLoc();
189 SmallVector<Value> aBits = extractBits(rewriter, a);
190
191 SmallVector<Value> partialProducts;
192 partialProducts.reserve(width);
193 // AND Array Construction - reducing to upper triangle:
194 // partialProducts[i] = ({a[i],..., a[i]} & a) << i
195 // optimised to: {a[i] & a[n-1], ..., a[i] & a[i+1], 0, a[i], 0, ..., 0}
196 assert(op.getNumResults() <= width &&
197 "Cannot return more results than the operator width");
198 auto zeroFalse = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
199 for (unsigned i = 0; i < op.getNumResults(); ++i) {
200 SmallVector<Value> row;
201 row.reserve(width);
202
203 if (2 * i >= width) {
204 // Pad the remaining rows with zeros
205 auto zeroWidth = hw::ConstantOp::create(rewriter, loc, APInt(width, 0));
206 partialProducts.push_back(zeroWidth);
207 continue;
208 }
209
210 if (i > 0) {
211 auto shiftBy = hw::ConstantOp::create(rewriter, loc, APInt(2 * i, 0));
212 row.push_back(shiftBy);
213 }
214 row.push_back(aBits[i]);
215
216 // Track width of constructed row
217 unsigned rowWidth = 2 * i + 1;
218 if (rowWidth < width) {
219 row.push_back(zeroFalse);
220 ++rowWidth;
221 }
222
223 for (unsigned j = i + 1; j < width; ++j) {
224 // Stop when we reach the required width
225 if (rowWidth == width)
226 break;
227
228 // Otherwise pad with zeros or partial product bits
229 ++rowWidth;
230 // Number of results indicates number of non-zero bits in input
231 if (j >= op.getNumResults()) {
232 row.push_back(zeroFalse);
233 continue;
234 }
235
236 auto ppBit =
237 rewriter.createOrFold<comb::AndOp>(loc, aBits[i], aBits[j]);
238 row.push_back(ppBit);
239 }
240 std::reverse(row.begin(), row.end());
241 auto ppRow = comb::ConcatOp::create(rewriter, loc, row);
242 partialProducts.push_back(ppRow);
243 }
244
245 rewriter.replaceOp(op, partialProducts);
246 return success();
247 }
248
249 static LogicalResult lowerBoothArray(PatternRewriter &rewriter, Value a,
250 Value b, PartialProductOp op,
251 unsigned width) {
252 Location loc = op.getLoc();
253 auto zeroFalse = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
254 auto zeroWidth = hw::ConstantOp::create(rewriter, loc, APInt(width, 0));
255
256 // Detect leading zeros in multiplicand due to zero-extension
257 // and truncate to reduce partial product bits
258 // {'0, a} * {'0, b}
259 auto rowWidth = width;
260 auto knownBitsA = comb::computeKnownBits(a);
261 if (!knownBitsA.Zero.isZero()) {
262 if (knownBitsA.Zero.countLeadingOnes() > 1) {
263 // Retain one leading zero to represent 2*{1'b0, a} = {a, 1'b0}
264 // {'0, a} -> {1'b0, a}
265 rowWidth -= knownBitsA.Zero.countLeadingOnes() - 1;
266 a = rewriter.createOrFold<comb::ExtractOp>(loc, a, 0, rowWidth);
267 }
268 }
269
270 // TODO - replace with a concatenation to aid longest path analysis
271 auto oneRowWidth =
272 hw::ConstantOp::create(rewriter, loc, APInt(rowWidth, 1));
273 // Booth encoding will select each row from {-2a, -1a, 0, 1a, 2a}
274 Value twoA = rewriter.createOrFold<comb::ShlOp>(loc, a, oneRowWidth);
275
276 // Encode based on the bits of b
277 // TODO: sort a and b based on non-zero bits to encode the smaller input
278 SmallVector<Value> bBits = extractBits(rewriter, b);
279
280 // Identify zero bits of b to reduce height of partial product array
281 auto knownBitsB = comb::computeKnownBits(b);
282 if (!knownBitsB.Zero.isZero()) {
283 for (unsigned i = 0; i < width; ++i)
284 if (knownBitsB.Zero[i])
285 bBits[i] = zeroFalse;
286 }
287
288 SmallVector<Value> partialProducts;
289 partialProducts.reserve(width);
290
291 // Booth encoding halves array height by grouping three bits at a time:
292 // partialProducts[i] = a * (-2*b[2*i+1] + b[2*i] + b[2*i-1]) << 2*i
293 // encNeg \approx (-2*b[2*i+1] + b[2*i] + b[2*i-1]) <= 0
294 // encOne = (-2*b[2*i+1] + b[2*i] + b[2*i-1]) == +/- 1
295 // encTwo = (-2*b[2*i+1] + b[2*i] + b[2*i-1]) == +/- 2
296 Value encNegPrev;
297
298 // For even width - additional row contains the final sign correction
299 for (unsigned i = 0; i <= width; i += 2) {
300 // Get Booth bits: b[i+1], b[i], b[i-1] (b[-1] = 0)
301 Value bim1 = (i == 0) ? zeroFalse : bBits[i - 1];
302 Value bi = (i < width) ? bBits[i] : zeroFalse;
303 Value bip1 = (i + 1 < width) ? bBits[i + 1] : zeroFalse;
304
305 // Is the encoding zero or negative (an approximation)
306 Value encNeg = bip1;
307 // Is the encoding one = b[i] xor b[i-1]
308 Value encOne = rewriter.createOrFold<comb::XorOp>(loc, bi, bim1, true);
309 // Is the encoding two = (bip1 & ~bi & ~bim1) | (~bip1 & bi & bim1)
310 Value constOne = hw::ConstantOp::create(rewriter, loc, APInt(1, 1));
311 Value biInv = rewriter.createOrFold<comb::XorOp>(loc, bi, constOne, true);
312 Value bip1Inv =
313 rewriter.createOrFold<comb::XorOp>(loc, bip1, constOne, true);
314 Value bim1Inv =
315 rewriter.createOrFold<comb::XorOp>(loc, bim1, constOne, true);
316
317 Value andLeft = rewriter.createOrFold<comb::AndOp>(
318 loc, ValueRange{bip1Inv, bi, bim1}, true);
319 Value andRight = rewriter.createOrFold<comb::AndOp>(
320 loc, ValueRange{bip1, biInv, bim1Inv}, true);
321 Value encTwo =
322 rewriter.createOrFold<comb::OrOp>(loc, andLeft, andRight, true);
323
324 Value encNegRepl =
325 rewriter.createOrFold<comb::ReplicateOp>(loc, encNeg, rowWidth);
326 Value encOneRepl =
327 rewriter.createOrFold<comb::ReplicateOp>(loc, encOne, rowWidth);
328 Value encTwoRepl =
329 rewriter.createOrFold<comb::ReplicateOp>(loc, encTwo, rowWidth);
330
331 // Select between 2*a or 1*a or 0*a
332 Value selTwoA = rewriter.createOrFold<comb::AndOp>(loc, encTwoRepl, twoA);
333 Value selOneA = rewriter.createOrFold<comb::AndOp>(loc, encOneRepl, a);
334 Value magA =
335 rewriter.createOrFold<comb::OrOp>(loc, selTwoA, selOneA, true);
336
337 // Conditionally invert the row
338 Value ppRow =
339 rewriter.createOrFold<comb::XorOp>(loc, magA, encNegRepl, true);
340
341 // Sign-extension Optimisation:
342 // Section 7.2.2 of "Application Specific Arithmetic" by Dinechin &
343 // Kumm Handle sign-extension and padding to full width s = encNeg
344 // (sign-bit) {s, s, s, s, s, pp} = {1, 1, 1, 1, 1, pp}
345 // + {0, 0, 0, 0,!s, '0}
346 // Applying this to every row we create an upper-triangle of 1s that
347 // can be optimised away since they will not affect the final sum.
348 // {!s3, 0,!s2, 0,!s1, 0}
349 // { 1, 1, 1, 1, 1, p1}
350 // { 1, 1, 1, p2 }
351 // { 1, p3 }
352 if (rowWidth < width) {
353 auto padding = width - rowWidth;
354 auto encNegInv = bip1Inv;
355
356 // Sign-extension trick not worth it for padding < 3
357 if (padding < 3) {
358 Value encNegPad =
359 rewriter.createOrFold<comb::ReplicateOp>(loc, encNeg, padding);
360 ppRow = rewriter.createOrFold<comb::ConcatOp>(
361 loc, ValueRange{encNegPad, ppRow}); // Pad to full width
362 } else if (i == 0) {
363 // First row = {!encNeg, encNeg, encNeg, ppRow}
364 ppRow = rewriter.createOrFold<comb::ConcatOp>(
365 loc, ValueRange{encNegInv, encNeg, encNeg, ppRow});
366 } else {
367 // Remaining rows = {1, !encNeg, ppRow}
368 ppRow = rewriter.createOrFold<comb::ConcatOp>(
369 loc, ValueRange{constOne, encNegInv, ppRow});
370 }
371
372 // Zero pad to full width
373 auto rowWidth = ppRow.getType().getIntOrFloatBitWidth();
374 if (rowWidth < width) {
375 auto zeroPad =
376 hw::ConstantOp::create(rewriter, loc, APInt(width - rowWidth, 0));
377 ppRow = rewriter.createOrFold<comb::ConcatOp>(
378 loc, ValueRange{zeroPad, ppRow});
379 }
380 }
381
382 // No sign-correction in the first row
383 if (i == 0) {
384 partialProducts.push_back(ppRow);
385 encNegPrev = encNeg;
386 continue;
387 }
388
389 // Insert a sign-correction from the previous row
390 assert(i >= 2 && "Expected i to be at least 2 for sign correction");
391 // {ppRow, 0, encNegPrev} << 2*(i-1)
392 Value withSignCorrection = rewriter.createOrFold<comb::ConcatOp>(
393 loc, ValueRange{ppRow, zeroFalse, encNegPrev});
394 Value ppAlignPre = rewriter.createOrFold<comb::ExtractOp>(
395 loc, withSignCorrection, 0, width);
396 Value shiftBy =
397 hw::ConstantOp::create(rewriter, loc, APInt(width, i - 2));
398 Value ppAlign =
399 rewriter.createOrFold<comb::ShlOp>(loc, ppAlignPre, shiftBy);
400 partialProducts.push_back(ppAlign);
401 encNegPrev = encNeg;
402
403 if (partialProducts.size() == op.getNumResults())
404 break;
405 }
406
407 // Zero-pad to match the required output width
408 while (partialProducts.size() < op.getNumResults())
409 partialProducts.push_back(zeroWidth);
410
411 assert(partialProducts.size() == op.getNumResults() &&
412 "Expected number of booth partial products to match results");
413
414 rewriter.replaceOp(op, partialProducts);
415 return success();
416 }
417};
418
419struct DatapathPosPartialProductOpConversion
420 : OpRewritePattern<PosPartialProductOp> {
421 using OpRewritePattern<PosPartialProductOp>::OpRewritePattern;
422
423 DatapathPosPartialProductOpConversion(MLIRContext *context, bool forceBooth)
424 : OpRewritePattern<PosPartialProductOp>(context),
425 forceBooth(forceBooth){};
426
427 const bool forceBooth;
428
429 LogicalResult matchAndRewrite(PosPartialProductOp op,
430 PatternRewriter &rewriter) const override {
431
432 Value a = op.getAddend0();
433 Value b = op.getAddend1();
434 Value c = op.getMultiplicand();
435 unsigned width = a.getType().getIntOrFloatBitWidth();
436
437 // Skip a zero width value.
438 if (width == 0) {
439 rewriter.replaceOpWithNewOp<hw::ConstantOp>(op, op.getType(0), 0);
440 return success();
441 }
442
443 // TODO: Implement Booth lowering
444 return lowerAndArray(rewriter, a, b, c, op, width);
445 }
446
447private:
448 static LogicalResult lowerAndArray(PatternRewriter &rewriter, Value a,
449 Value b, Value c, PosPartialProductOp op,
450 unsigned width) {
451
452 Location loc = op.getLoc();
453 // Encode (a+b) by implementing a half-adder - then note the following
454 // fact carry[i] & save[i] == false
455 auto carry = rewriter.createOrFold<comb::AndOp>(loc, a, b);
456 auto save = rewriter.createOrFold<comb::XorOp>(loc, a, b);
457
458 SmallVector<Value> carryBits = extractBits(rewriter, carry);
459 SmallVector<Value> saveBits = extractBits(rewriter, save);
460
461 // Reduce c width based on leading zeros
462 auto rowWidth = width;
463 auto knownBitsC = comb::computeKnownBits(c);
464 if (!knownBitsC.Zero.isZero()) {
465 if (knownBitsC.Zero.countLeadingOnes() > 1) {
466 // Retain one leading zero to represent 2*{1'b0, c} = {c, 1'b0}
467 // {'0, c} -> {1'b0, c}
468 rowWidth -= knownBitsC.Zero.countLeadingOnes() - 1;
469 c = rewriter.createOrFold<comb::ExtractOp>(loc, c, 0, rowWidth);
470 }
471 }
472
473 // Compute 2*c for use in array construction
474 Value zero = hw::ConstantOp::create(rewriter, loc, APInt(1, 0));
475 Value twoCWider = rewriter.create<comb::ConcatOp>(loc, ValueRange{c, zero});
476 Value twoC = rewriter.create<comb::ExtractOp>(loc, twoCWider, 0, rowWidth);
477
478 // AND Array Construction:
479 // pp[i] = ( (carry[i] * (c<<1)) | (save[i] * c) ) << i
480 SmallVector<Value> partialProducts;
481 partialProducts.reserve(width);
482
483 assert(op.getNumResults() <= width &&
484 "Cannot return more results than the operator width");
485
486 for (unsigned i = 0; i < op.getNumResults(); ++i) {
487 auto replSave =
488 rewriter.createOrFold<comb::ReplicateOp>(loc, saveBits[i], rowWidth);
489 auto replCarry =
490 rewriter.createOrFold<comb::ReplicateOp>(loc, carryBits[i], rowWidth);
491
492 auto ppRowSave = rewriter.createOrFold<comb::AndOp>(loc, replSave, c);
493 auto ppRowCarry =
494 rewriter.createOrFold<comb::AndOp>(loc, replCarry, twoC);
495 auto ppRow =
496 rewriter.createOrFold<comb::OrOp>(loc, ppRowSave, ppRowCarry);
497 auto ppAlign = ppRow;
498 if (i > 0) {
499 auto shiftBy = hw::ConstantOp::create(rewriter, loc, APInt(i, 0));
500 ppAlign =
501 comb::ConcatOp::create(rewriter, loc, ValueRange{ppRow, shiftBy});
502 }
503
504 // May need to truncate shifted value
505 if (rowWidth + i > width) {
506 auto ppAlignTrunc =
507 rewriter.createOrFold<comb::ExtractOp>(loc, ppAlign, 0, width);
508 partialProducts.push_back(ppAlignTrunc);
509 continue;
510 }
511 // May need to zero pad to approriate width
512 if (rowWidth + i < width) {
513 auto padding = width - rowWidth - i;
514 Value zeroPad =
515 hw::ConstantOp::create(rewriter, loc, APInt(padding, 0));
516 partialProducts.push_back(rewriter.createOrFold<comb::ConcatOp>(
517 loc, ValueRange{zeroPad, ppAlign})); // Pad to full width
518 continue;
519 }
520
521 partialProducts.push_back(ppAlign);
522 }
523
524 rewriter.replaceOp(op, partialProducts);
525 return success();
526 }
527};
528
529} // namespace
530
531//===----------------------------------------------------------------------===//
532// Convert Datapath to Comb pass
533//===----------------------------------------------------------------------===//
534
535namespace {
536struct ConvertDatapathToCombPass
537 : public impl::ConvertDatapathToCombBase<ConvertDatapathToCombPass> {
538 void runOnOperation() override;
539 using ConvertDatapathToCombBase<
540 ConvertDatapathToCombPass>::ConvertDatapathToCombBase;
541};
542} // namespace
543
545 Operation *op, RewritePatternSet &&patterns,
547 // TODO: Topologically sort the operations in the module to ensure that all
548 // dependencies are processed before their users.
549 mlir::GreedyRewriteConfig config;
550 // Set the listener to update timing information
551 // HACK: Setting max iterations to 2 to ensure that the patterns are
552 // one-shot, making sure target operations are datapath operations are
553 // replaced.
554 config.setMaxIterations(2).setListener(analysis).setUseTopDownTraversal(true);
555
556 // Apply the patterns greedily
557 if (failed(mlir::applyPatternsGreedily(op, std::move(patterns), config)))
558 return failure();
559
560 return success();
561}
562
563void ConvertDatapathToCombPass::runOnOperation() {
564 RewritePatternSet patterns(&getContext());
565
566 patterns.add<DatapathPartialProductOpConversion,
567 DatapathPosPartialProductOpConversion>(patterns.getContext(),
568 forceBooth);
569 synth::IncrementalLongestPathAnalysis *analysis = nullptr;
570 if (timingAware)
571 analysis = &getAnalysis<synth::IncrementalLongestPathAnalysis>();
572 if (lowerCompressToAdd)
573 // Lower compressors to simple add operations for downstream optimisations
574 patterns.add<DatapathCompressOpAddConversion>(patterns.getContext());
575 else
576 // Lower compressors to a complete gate-level implementation
577 patterns.add<DatapathCompressOpConversion>(patterns.getContext(), analysis);
578
580 getOperation(), std::move(patterns), analysis)))
581 return signalPassFailure();
582
583 // Verify that all Datapath operations have been successfully converted.
584 // Walk the operation and check for any remaining Datapath dialect
585 // operations.
586 auto result = getOperation()->walk([&](Operation *op) {
587 if (llvm::isa_and_nonnull<datapath::DatapathDialect>(op->getDialect())) {
588 op->emitError("Datapath operation not converted: ") << *op;
589 return WalkResult::interrupt();
590 }
591 return WalkResult::advance();
592 });
593 if (result.wasInterrupted())
594 return signalPassFailure();
595}
assert(baseType &&"element must be base type")
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static SmallVector< Value > extractBits(OpBuilder &builder, Value val)
static LogicalResult applyPatternsGreedilyWithTimingInfo(Operation *op, RewritePatternSet &&patterns, synth::IncrementalLongestPathAnalysis *analysis)
create(data_type, value)
Definition hw.py:433
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.