CIRCT  19.0.0git
LoopScheduleToCalyx.cpp
Go to the documentation of this file.
1 //=== LoopScheduleToCalyx.cpp - LoopSchedule to Calyx pass entry point-----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the main LoopSchedule to Calyx conversion pass implementation.
10 //
11 //===----------------------------------------------------------------------===//
12 
18 #include "circt/Dialect/HW/HWOps.h"
20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
21 #include "mlir/Conversion/LLVMCommon/Pattern.h"
22 #include "mlir/Dialect/Arith/IR/Arith.h"
23 #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
24 #include "mlir/Dialect/Func/IR/FuncOps.h"
25 #include "mlir/Dialect/MemRef/IR/MemRef.h"
26 #include "mlir/IR/AsmState.h"
27 #include "mlir/IR/Matchers.h"
28 #include "mlir/Pass/Pass.h"
29 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
30 #include "llvm/ADT/TypeSwitch.h"
31 
32 #include <variant>
33 
34 namespace circt {
35 #define GEN_PASS_DEF_LOOPSCHEDULETOCALYX
36 #include "circt/Conversion/Passes.h.inc"
37 } // namespace circt
38 
39 using namespace llvm;
40 using namespace mlir;
41 using namespace mlir::arith;
42 using namespace mlir::cf;
43 using namespace mlir::func;
44 using namespace circt::loopschedule;
45 
46 namespace circt {
47 namespace pipelinetocalyx {
48 
49 //===----------------------------------------------------------------------===//
50 // Utility types
51 //===----------------------------------------------------------------------===//
52 
53 class PipelineWhileOp : public calyx::WhileOpInterface<LoopSchedulePipelineOp> {
54 public:
55  explicit PipelineWhileOp(LoopSchedulePipelineOp op)
56  : calyx::WhileOpInterface<LoopSchedulePipelineOp>(op) {}
57 
58  Block::BlockArgListType getBodyArgs() override {
59  return getOperation().getStagesBlock().getArguments();
60  }
61 
62  Block *getBodyBlock() override { return &getOperation().getStagesBlock(); }
63 
64  Block *getConditionBlock() override { return &getOperation().getCondBlock(); }
65 
66  Value getConditionValue() override {
67  return getOperation().getCondBlock().getTerminator()->getOperand(0);
68  }
69 
70  std::optional<int64_t> getBound() override {
71  return getOperation().getTripCount();
72  }
73 };
74 
75 //===----------------------------------------------------------------------===//
76 // Lowering state classes
77 //===----------------------------------------------------------------------===//
78 
80  /// While operation to schedule.
82  /// The group(s) to schedule before the while operation These groups should
83  /// set the initial value(s) of the loop init_args register(s).
84  SmallVector<calyx::GroupOp> initGroups;
85 };
86 
87 /// A variant of types representing scheduleable operations.
88 using Scheduleable = std::variant<calyx::GroupOp, PipelineScheduleable>;
89 
90 /// Holds additional information required for scheduling Pipeline pipelines.
91 class PipelineScheduler : public calyx::SchedulerInterface<Scheduleable> {
92 public:
93  /// Registers operations that may be used in a pipeline, but does not produce
94  /// a value to be used in a further stage.
95  void registerNonPipelineOperations(Operation *op,
96  calyx::GroupInterface group) {
97  operationToGroup[op] = group;
98  }
99 
100  /// Returns the group registered for this non-pipelined value, and None
101  /// otherwise.
102  template <typename TGroupOp = calyx::GroupInterface>
103  std::optional<TGroupOp> getNonPipelinedGroupFrom(Operation *op) {
104  auto it = operationToGroup.find(op);
105  if (it == operationToGroup.end())
106  return std::nullopt;
107 
108  if constexpr (std::is_same<TGroupOp, calyx::GroupInterface>::value)
109  return it->second;
110  else {
111  auto group = dyn_cast<TGroupOp>(it->second.getOperation());
112  assert(group && "Actual group type differed from expected group type");
113  return group;
114  }
115  }
116  /// Register reg as being the idx'th pipeline register for the stage.
117  void addPipelineReg(Operation *stage, calyx::RegisterOp reg, unsigned idx) {
118  assert(pipelineRegs[stage].count(idx) == 0);
119  assert(idx < stage->getNumResults());
120  pipelineRegs[stage][idx] = reg;
121  }
122 
123  /// Return a mapping of stage result indices to pipeline registers.
124  const DenseMap<unsigned, calyx::RegisterOp> &
125  getPipelineRegs(Operation *stage) {
126  return pipelineRegs[stage];
127  }
128 
129  /// Add a stage's groups to the pipeline prologue.
130  void addPipelinePrologue(Operation *op, SmallVector<StringAttr> groupNames) {
131  pipelinePrologue[op].push_back(groupNames);
132  }
133 
134  /// Add a stage's groups to the pipeline epilogue.
135  void addPipelineEpilogue(Operation *op, SmallVector<StringAttr> groupNames) {
136  pipelineEpilogue[op].push_back(groupNames);
137  }
138 
139  /// Get the pipeline prologue.
140  SmallVector<SmallVector<StringAttr>> getPipelinePrologue(Operation *op) {
141  return pipelinePrologue[op];
142  }
143 
144  /// Create the pipeline prologue.
145  void createPipelinePrologue(Operation *op, PatternRewriter &rewriter) {
146  auto stages = pipelinePrologue[op];
147  for (size_t i = 0, e = stages.size(); i < e; ++i) {
148  PatternRewriter::InsertionGuard g(rewriter);
149  auto parOp = rewriter.create<calyx::ParOp>(op->getLoc());
150  rewriter.setInsertionPointToStart(parOp.getBodyBlock());
151  for (size_t j = 0; j < i + 1; ++j)
152  for (auto group : stages[j])
153  rewriter.create<calyx::EnableOp>(op->getLoc(), group);
154  }
155  }
156 
157  /// Create the pipeline epilogue.
158  void createPipelineEpilogue(Operation *op, PatternRewriter &rewriter) {
159  auto stages = pipelineEpilogue[op];
160  for (size_t i = 0, e = stages.size(); i < e; ++i) {
161  PatternRewriter::InsertionGuard g(rewriter);
162  auto parOp = rewriter.create<calyx::ParOp>(op->getLoc());
163  rewriter.setInsertionPointToStart(parOp.getBodyBlock());
164  for (size_t j = i, f = stages.size(); j < f; ++j)
165  for (auto group : stages[j])
166  rewriter.create<calyx::EnableOp>(op->getLoc(), group);
167  }
168  }
169 
170 private:
171  /// A mapping between operations and the group to which it was assigned. This
172  /// is used for specific corner cases, such as pipeline stages that may not
173  /// actually pipeline any values.
174  DenseMap<Operation *, calyx::GroupInterface> operationToGroup;
175 
176  /// A mapping from pipeline stages to their registers.
177  DenseMap<Operation *, DenseMap<unsigned, calyx::RegisterOp>> pipelineRegs;
178 
179  /// A mapping from pipeline ops to a vector of vectors of group names that
180  /// constitute the pipeline prologue. Each inner vector consists of the groups
181  /// for one stage.
182  DenseMap<Operation *, SmallVector<SmallVector<StringAttr>>> pipelinePrologue;
183 
184  /// A mapping from pipeline ops to a vector of vectors of group names that
185  /// constitute the pipeline epilogue. Each inner vector consists of the groups
186  /// for one stage.
187  DenseMap<Operation *, SmallVector<SmallVector<StringAttr>>> pipelineEpilogue;
188 };
189 
190 /// Handles the current state of lowering of a Calyx component. It is mainly
191 /// used as a key/value store for recording information during partial lowering,
192 /// which is required at later lowering passes.
195  public calyx::LoopLoweringStateInterface<PipelineWhileOp>,
196  public PipelineScheduler {
197 public:
198  ComponentLoweringState(calyx::ComponentOp component)
199  : calyx::ComponentLoweringStateInterface(component) {}
200 };
201 
202 //===----------------------------------------------------------------------===//
203 // Conversion patterns
204 //===----------------------------------------------------------------------===//
205 
206 /// Iterate through the operations of a source function and instantiate
207 /// components or primitives based on the type of the operations.
209  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
210 
211  LogicalResult
213  PatternRewriter &rewriter) const override {
214  /// We walk the operations of the funcOp to ensure that all def's have
215  /// been visited before their uses.
216  bool opBuiltSuccessfully = true;
217  funcOp.walk([&](Operation *_op) {
218  opBuiltSuccessfully &=
219  TypeSwitch<mlir::Operation *, bool>(_op)
220  .template Case<arith::ConstantOp, ReturnOp, BranchOpInterface,
221  /// memref
222  memref::AllocOp, memref::AllocaOp, memref::LoadOp,
223  memref::StoreOp,
224  /// standard arithmetic
225  AddIOp, SubIOp, CmpIOp, ShLIOp, ShRUIOp, ShRSIOp,
226  AndIOp, XOrIOp, OrIOp, ExtUIOp, TruncIOp, MulIOp,
227  DivUIOp, RemUIOp, IndexCastOp,
228  /// static logic
229  LoopScheduleTerminatorOp>(
230  [&](auto op) { return buildOp(rewriter, op).succeeded(); })
231  .template Case<FuncOp, LoopSchedulePipelineOp,
232  LoopScheduleRegisterOp,
233  LoopSchedulePipelineStageOp>([&](auto) {
234  /// Skip: these special cases will be handled separately.
235  return true;
236  })
237  .Default([&](auto op) {
238  op->emitError() << "Unhandled operation during BuildOpGroups()";
239  return false;
240  });
241 
242  return opBuiltSuccessfully ? WalkResult::advance()
243  : WalkResult::interrupt();
244  });
245 
246  return success(opBuiltSuccessfully);
247  }
248 
249 private:
250  /// Op builder specializations.
251  LogicalResult buildOp(PatternRewriter &rewriter,
252  BranchOpInterface brOp) const;
253  LogicalResult buildOp(PatternRewriter &rewriter,
254  arith::ConstantOp constOp) const;
255  LogicalResult buildOp(PatternRewriter &rewriter, AddIOp op) const;
256  LogicalResult buildOp(PatternRewriter &rewriter, SubIOp op) const;
257  LogicalResult buildOp(PatternRewriter &rewriter, MulIOp op) const;
258  LogicalResult buildOp(PatternRewriter &rewriter, DivUIOp op) const;
259  LogicalResult buildOp(PatternRewriter &rewriter, RemUIOp op) const;
260  LogicalResult buildOp(PatternRewriter &rewriter, ShRUIOp op) const;
261  LogicalResult buildOp(PatternRewriter &rewriter, ShRSIOp op) const;
262  LogicalResult buildOp(PatternRewriter &rewriter, ShLIOp op) const;
263  LogicalResult buildOp(PatternRewriter &rewriter, AndIOp op) const;
264  LogicalResult buildOp(PatternRewriter &rewriter, OrIOp op) const;
265  LogicalResult buildOp(PatternRewriter &rewriter, XOrIOp op) const;
266  LogicalResult buildOp(PatternRewriter &rewriter, CmpIOp op) const;
267  LogicalResult buildOp(PatternRewriter &rewriter, TruncIOp op) const;
268  LogicalResult buildOp(PatternRewriter &rewriter, ExtUIOp op) const;
269  LogicalResult buildOp(PatternRewriter &rewriter, ReturnOp op) const;
270  LogicalResult buildOp(PatternRewriter &rewriter, IndexCastOp op) const;
271  LogicalResult buildOp(PatternRewriter &rewriter, memref::AllocOp op) const;
272  LogicalResult buildOp(PatternRewriter &rewriter, memref::AllocaOp op) const;
273  LogicalResult buildOp(PatternRewriter &rewriter, memref::LoadOp op) const;
274  LogicalResult buildOp(PatternRewriter &rewriter, memref::StoreOp op) const;
275  LogicalResult buildOp(PatternRewriter &rewriter,
276  LoopScheduleTerminatorOp op) const;
277 
278  /// buildLibraryOp will build a TCalyxLibOp inside a TGroupOp based on the
279  /// source operation TSrcOp.
280  template <typename TGroupOp, typename TCalyxLibOp, typename TSrcOp>
281  LogicalResult buildLibraryOp(PatternRewriter &rewriter, TSrcOp op,
282  TypeRange srcTypes, TypeRange dstTypes) const {
283  SmallVector<Type> types;
284  llvm::append_range(types, srcTypes);
285  llvm::append_range(types, dstTypes);
286 
287  auto calyxOp =
288  getState<ComponentLoweringState>().getNewLibraryOpInstance<TCalyxLibOp>(
289  rewriter, op.getLoc(), types);
290 
291  auto directions = calyxOp.portDirections();
292  SmallVector<Value, 4> opInputPorts;
293  SmallVector<Value, 4> opOutputPorts;
294  for (auto dir : enumerate(directions)) {
295  if (dir.value() == calyx::Direction::Input)
296  opInputPorts.push_back(calyxOp.getResult(dir.index()));
297  else
298  opOutputPorts.push_back(calyxOp.getResult(dir.index()));
299  }
300  assert(
301  opInputPorts.size() == op->getNumOperands() &&
302  opOutputPorts.size() == op->getNumResults() &&
303  "Expected an equal number of in/out ports in the Calyx library op with "
304  "respect to the number of operands/results of the source operation.");
305 
306  /// Create assignments to the inputs of the library op.
307  auto group = createGroupForOp<TGroupOp>(rewriter, op);
308  rewriter.setInsertionPointToEnd(group.getBodyBlock());
309  for (auto dstOp : enumerate(opInputPorts))
310  rewriter.create<calyx::AssignOp>(op.getLoc(), dstOp.value(),
311  op->getOperand(dstOp.index()));
312 
313  /// Replace the result values of the source operator with the new operator.
314  for (auto res : enumerate(opOutputPorts)) {
315  getState<ComponentLoweringState>().registerEvaluatingGroup(res.value(),
316  group);
317  op->getResult(res.index()).replaceAllUsesWith(res.value());
318  }
319  return success();
320  }
321 
322  /// buildLibraryOp which provides in- and output types based on the operands
323  /// and results of the op argument.
324  template <typename TGroupOp, typename TCalyxLibOp, typename TSrcOp>
325  LogicalResult buildLibraryOp(PatternRewriter &rewriter, TSrcOp op) const {
326  return buildLibraryOp<TGroupOp, TCalyxLibOp, TSrcOp>(
327  rewriter, op, op.getOperandTypes(), op->getResultTypes());
328  }
329 
330  /// Creates a group named by the basic block which the input op resides in.
331  template <typename TGroupOp>
332  TGroupOp createGroupForOp(PatternRewriter &rewriter, Operation *op) const {
333  Block *block = op->getBlock();
334  auto groupName = getState<ComponentLoweringState>().getUniqueName(
335  loweringState().blockName(block));
336  return calyx::createGroup<TGroupOp>(
337  rewriter, getState<ComponentLoweringState>().getComponentOp(),
338  op->getLoc(), groupName);
339  }
340 
341  /// buildLibraryBinaryPipeOp will build a TCalyxLibBinaryPipeOp, to
342  /// deal with MulIOp, DivUIOp and RemUIOp.
343  template <typename TOpType, typename TSrcOp>
344  LogicalResult buildLibraryBinaryPipeOp(PatternRewriter &rewriter, TSrcOp op,
345  TOpType opPipe, Value out) const {
346  StringRef opName = TSrcOp::getOperationName().split(".").second;
347  Location loc = op.getLoc();
348  Type width = op.getResult().getType();
349  // Pass the result from the Operation to the Calyx primitive.
350  op.getResult().replaceAllUsesWith(out);
351  auto reg = createRegister(
352  op.getLoc(), rewriter, getComponent(), width.getIntOrFloatBitWidth(),
353  getState<ComponentLoweringState>().getUniqueName(opName));
354  // Operation pipelines are not combinational, so a GroupOp is required.
355  auto group = createGroupForOp<calyx::GroupOp>(rewriter, op);
356  getState<ComponentLoweringState>().addBlockScheduleable(op->getBlock(),
357  group);
358 
359  rewriter.setInsertionPointToEnd(group.getBodyBlock());
360  rewriter.create<calyx::AssignOp>(loc, opPipe.getLeft(), op.getLhs());
361  rewriter.create<calyx::AssignOp>(loc, opPipe.getRight(), op.getRhs());
362  // Write the output to this register.
363  rewriter.create<calyx::AssignOp>(loc, reg.getIn(), out);
364  // The write enable port is high when the pipeline is done.
365  rewriter.create<calyx::AssignOp>(loc, reg.getWriteEn(), opPipe.getDone());
366  rewriter.create<calyx::AssignOp>(
367  loc, opPipe.getGo(),
368  createConstant(loc, rewriter, getComponent(), 1, 1));
369  // The group is done when the register write is complete.
370  rewriter.create<calyx::GroupDoneOp>(loc, reg.getDone());
371 
372  // Register the values for the pipeline.
373  getState<ComponentLoweringState>().registerEvaluatingGroup(out, group);
374  getState<ComponentLoweringState>().registerEvaluatingGroup(opPipe.getLeft(),
375  group);
376  getState<ComponentLoweringState>().registerEvaluatingGroup(
377  opPipe.getRight(), group);
378 
379  return success();
380  }
381 
382  /// Creates assignments within the provided group to the address ports of the
383  /// memoryOp based on the provided addressValues.
384  void assignAddressPorts(PatternRewriter &rewriter, Location loc,
385  calyx::GroupInterface group,
386  calyx::MemoryInterface memoryInterface,
387  Operation::operand_range addressValues) const {
388  IRRewriter::InsertionGuard guard(rewriter);
389  rewriter.setInsertionPointToEnd(group.getBody());
390  auto addrPorts = memoryInterface.addrPorts();
391  if (addressValues.empty()) {
392  assert(
393  addrPorts.size() == 1 &&
394  "We expected a 1 dimensional memory of size 1 because there were no "
395  "address assignment values");
396  // Assign 1'd0 to the address port.
397  rewriter.create<calyx::AssignOp>(
398  loc, addrPorts[0],
399  createConstant(loc, rewriter, getComponent(), 1, 0));
400  } else {
401  assert(addrPorts.size() == addressValues.size() &&
402  "Mismatch between number of address ports of the provided memory "
403  "and address assignment values");
404  for (auto address : enumerate(addressValues))
405  rewriter.create<calyx::AssignOp>(loc, addrPorts[address.index()],
406  address.value());
407  }
408  }
409 };
410 
411 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
412  memref::LoadOp loadOp) const {
413  Value memref = loadOp.getMemref();
414  auto memoryInterface =
415  getState<ComponentLoweringState>().getMemoryInterface(memref);
416  if (calyx::noStoresToMemory(memref) && calyx::singleLoadFromMemory(memref)) {
417  // Single load from memory; we do not need to write the
418  // output to a register. This is essentially a "combinational read" under
419  // current Calyx semantics with memory, and thus can be done in a
420  // combinational group. Note that if any stores are done to this memory,
421  // we require that the load and store be in separate non-combinational
422  // groups to avoid reading and writing to the same memory in the same group.
423  auto combGroup = createGroupForOp<calyx::CombGroupOp>(rewriter, loadOp);
424  assignAddressPorts(rewriter, loadOp.getLoc(), combGroup, memoryInterface,
425  loadOp.getIndices());
426 
427  // We refrain from replacing the loadOp result with
428  // memoryInterface.readData, since multiple loadOp's need to be converted
429  // to a single memory's ReadData. If this replacement is done now, we lose
430  // the link between which SSA memref::LoadOp values map to which groups for
431  // loading a value from the Calyx memory. At this point of lowering, we
432  // keep the memref::LoadOp SSA value, and do value replacement _after_
433  // control has been generated (see LateSSAReplacement). This is *vital* for
434  // things such as InlineCombGroups to be able to properly track which
435  // memory assignment groups belong to which accesses.
436  getState<ComponentLoweringState>().registerEvaluatingGroup(
437  loadOp.getResult(), combGroup);
438  } else {
439  auto group = createGroupForOp<calyx::GroupOp>(rewriter, loadOp);
440  assignAddressPorts(rewriter, loadOp.getLoc(), group, memoryInterface,
441  loadOp.getIndices());
442 
443  // Multiple loads from the same memory; In this case, we _may_ have a
444  // structural hazard in the design we generate. To get around this, we
445  // conservatively place a register in front of each load operation, and
446  // replace all uses of the loaded value with the register output. Proper
447  // handling of this requires the combinational group inliner/scheduler to
448  // be aware of when a combinational expression references multiple loaded
449  // values from the same memory, and then schedule assignments to temporary
450  // registers to get around the structural hazard.
451  auto reg = createRegister(
452  loadOp.getLoc(), rewriter, getComponent(),
453  loadOp.getMemRefType().getElementTypeBitWidth(),
454  getState<ComponentLoweringState>().getUniqueName("load"));
456  rewriter, group, getState<ComponentLoweringState>().getComponentOp(),
457  reg, memoryInterface.readData());
458  loadOp.getResult().replaceAllUsesWith(reg.getOut());
459  getState<ComponentLoweringState>().addBlockScheduleable(loadOp->getBlock(),
460  group);
461  }
462  return success();
463 }
464 
465 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
466  memref::StoreOp storeOp) const {
467  auto memoryInterface = getState<ComponentLoweringState>().getMemoryInterface(
468  storeOp.getMemref());
469  auto group = createGroupForOp<calyx::GroupOp>(rewriter, storeOp);
470 
471  // This is a sequential group, so register it as being scheduleable for the
472  // block.
473  getState<ComponentLoweringState>().addBlockScheduleable(storeOp->getBlock(),
474  group);
475  assignAddressPorts(rewriter, storeOp.getLoc(), group, memoryInterface,
476  storeOp.getIndices());
477  rewriter.setInsertionPointToEnd(group.getBodyBlock());
478  rewriter.create<calyx::AssignOp>(
479  storeOp.getLoc(), memoryInterface.writeData(), storeOp.getValueToStore());
480  rewriter.create<calyx::AssignOp>(
481  storeOp.getLoc(), memoryInterface.writeEn(),
482  createConstant(storeOp.getLoc(), rewriter, getComponent(), 1, 1));
483  if (memoryInterface.contentEnOpt().has_value()) {
484  // If memory has content enable, it must be asserted when writing
485  rewriter.create<calyx::AssignOp>(
486  storeOp.getLoc(), memoryInterface.contentEn(),
487  createConstant(storeOp.getLoc(), rewriter, getComponent(), 1, 1));
488  }
489  rewriter.create<calyx::GroupDoneOp>(storeOp.getLoc(), memoryInterface.done());
490 
491  getState<ComponentLoweringState>().registerNonPipelineOperations(storeOp,
492  group);
493 
494  return success();
495 }
496 
497 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
498  MulIOp mul) const {
499  Location loc = mul.getLoc();
500  Type width = mul.getResult().getType(), one = rewriter.getI1Type();
501  auto mulPipe =
502  getState<ComponentLoweringState>()
503  .getNewLibraryOpInstance<calyx::MultPipeLibOp>(
504  rewriter, loc, {one, one, one, width, width, width, one});
505  return buildLibraryBinaryPipeOp<calyx::MultPipeLibOp>(
506  rewriter, mul, mulPipe,
507  /*out=*/mulPipe.getOut());
508 }
509 
510 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
511  DivUIOp div) const {
512  Location loc = div.getLoc();
513  Type width = div.getResult().getType(), one = rewriter.getI1Type();
514  auto divPipe =
515  getState<ComponentLoweringState>()
516  .getNewLibraryOpInstance<calyx::DivUPipeLibOp>(
517  rewriter, loc, {one, one, one, width, width, width, width, one});
518  return buildLibraryBinaryPipeOp<calyx::DivUPipeLibOp>(
519  rewriter, div, divPipe,
520  /*out=*/divPipe.getOut());
521 }
522 
523 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
524  RemUIOp rem) const {
525  Location loc = rem.getLoc();
526  Type width = rem.getResult().getType(), one = rewriter.getI1Type();
527  auto remPipe =
528  getState<ComponentLoweringState>()
529  .getNewLibraryOpInstance<calyx::DivUPipeLibOp>(
530  rewriter, loc, {one, one, one, width, width, width, width, one});
531  return buildLibraryBinaryPipeOp<calyx::DivUPipeLibOp>(
532  rewriter, rem, remPipe,
533  /*out=*/remPipe.getOut());
534 }
535 
536 template <typename TAllocOp>
537 static LogicalResult buildAllocOp(ComponentLoweringState &componentState,
538  PatternRewriter &rewriter, TAllocOp allocOp) {
539  rewriter.setInsertionPointToStart(
540  componentState.getComponentOp().getBodyBlock());
541  MemRefType memtype = allocOp.getType();
542  SmallVector<int64_t> addrSizes;
543  SmallVector<int64_t> sizes;
544  for (int64_t dim : memtype.getShape()) {
545  sizes.push_back(dim);
546  addrSizes.push_back(calyx::handleZeroWidth(dim));
547  }
548  // If memref has no size (e.g., memref<i32>) create a 1 dimensional memory of
549  // size 1.
550  if (sizes.empty() && addrSizes.empty()) {
551  sizes.push_back(1);
552  addrSizes.push_back(1);
553  }
554  auto memoryOp = rewriter.create<calyx::MemoryOp>(
555  allocOp.getLoc(), componentState.getUniqueName("mem"),
556  memtype.getElementType().getIntOrFloatBitWidth(), sizes, addrSizes);
557  // Externalize memories by default. This makes it easier for the native
558  // compiler to provide initialized memories.
559  memoryOp->setAttr("external",
560  IntegerAttr::get(rewriter.getI1Type(), llvm::APInt(1, 1)));
561  componentState.registerMemoryInterface(allocOp.getResult(),
562  calyx::MemoryInterface(memoryOp));
563  return success();
564 }
565 
566 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
567  memref::AllocOp allocOp) const {
568  return buildAllocOp(getState<ComponentLoweringState>(), rewriter, allocOp);
569 }
570 
571 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
572  memref::AllocaOp allocOp) const {
573  return buildAllocOp(getState<ComponentLoweringState>(), rewriter, allocOp);
574 }
575 
576 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
577  LoopScheduleTerminatorOp term) const {
578  if (term.getOperands().size() == 0)
579  return success();
580 
581  // Replace the pipeline's result(s) with the terminator's results.
582  auto *pipeline = term->getParentOp();
583  for (size_t i = 0, e = pipeline->getNumResults(); i < e; ++i)
584  pipeline->getResult(i).replaceAllUsesWith(term.getResults()[i]);
585 
586  return success();
587 }
588 
589 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
590  BranchOpInterface brOp) const {
591  /// Branch argument passing group creation
592  /// Branch operands are passed through registers. In BuildBasicBlockRegs we
593  /// created registers for all branch arguments of each block. We now
594  /// create groups for assigning values to these registers.
595  Block *srcBlock = brOp->getBlock();
596  for (auto succBlock : enumerate(brOp->getSuccessors())) {
597  auto succOperands = brOp.getSuccessorOperands(succBlock.index());
598  if (succOperands.empty())
599  continue;
600  // Create operand passing group
601  std::string groupName = loweringState().blockName(srcBlock) + "_to_" +
602  loweringState().blockName(succBlock.value());
603  auto groupOp = calyx::createGroup<calyx::GroupOp>(rewriter, getComponent(),
604  brOp.getLoc(), groupName);
605  // Fetch block argument registers associated with the basic block
606  auto dstBlockArgRegs =
607  getState<ComponentLoweringState>().getBlockArgRegs(succBlock.value());
608  // Create register assignment for each block argument
609  for (auto arg : enumerate(succOperands.getForwardedOperands())) {
610  auto reg = dstBlockArgRegs[arg.index()];
612  rewriter, groupOp,
613  getState<ComponentLoweringState>().getComponentOp(), reg,
614  arg.value());
615  }
616  /// Register the group as a block argument group, to be executed
617  /// when entering the successor block from this block (srcBlock).
618  getState<ComponentLoweringState>().addBlockArgGroup(
619  srcBlock, succBlock.value(), groupOp);
620  }
621  return success();
622 }
623 
624 /// For each return statement, we create a new group for assigning to the
625 /// previously created return value registers.
626 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
627  ReturnOp retOp) const {
628  if (retOp.getNumOperands() == 0)
629  return success();
630 
631  std::string groupName =
632  getState<ComponentLoweringState>().getUniqueName("ret_assign");
633  auto groupOp = calyx::createGroup<calyx::GroupOp>(rewriter, getComponent(),
634  retOp.getLoc(), groupName);
635  for (auto op : enumerate(retOp.getOperands())) {
636  auto reg = getState<ComponentLoweringState>().getReturnReg(op.index());
638  rewriter, groupOp, getState<ComponentLoweringState>().getComponentOp(),
639  reg, op.value());
640  }
641  /// Schedule group for execution for when executing the return op block.
642  getState<ComponentLoweringState>().addBlockScheduleable(retOp->getBlock(),
643  groupOp);
644  return success();
645 }
646 
647 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
648  arith::ConstantOp constOp) const {
649  /// Move constant operations to the compOp body as hw::ConstantOp's.
650  APInt value;
651  calyx::matchConstantOp(constOp, value);
652  auto hwConstOp = rewriter.replaceOpWithNewOp<hw::ConstantOp>(constOp, value);
653  hwConstOp->moveAfter(getComponent().getBodyBlock(),
654  getComponent().getBodyBlock()->begin());
655  return success();
656 }
657 
658 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
659  AddIOp op) const {
660  return buildLibraryOp<calyx::CombGroupOp, calyx::AddLibOp>(rewriter, op);
661 }
662 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
663  SubIOp op) const {
664  return buildLibraryOp<calyx::CombGroupOp, calyx::SubLibOp>(rewriter, op);
665 }
666 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
667  ShRUIOp op) const {
668  return buildLibraryOp<calyx::CombGroupOp, calyx::RshLibOp>(rewriter, op);
669 }
670 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
671  ShRSIOp op) const {
672  return buildLibraryOp<calyx::CombGroupOp, calyx::SrshLibOp>(rewriter, op);
673 }
674 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
675  ShLIOp op) const {
676  return buildLibraryOp<calyx::CombGroupOp, calyx::LshLibOp>(rewriter, op);
677 }
678 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
679  AndIOp op) const {
680  return buildLibraryOp<calyx::CombGroupOp, calyx::AndLibOp>(rewriter, op);
681 }
682 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
683  OrIOp op) const {
684  return buildLibraryOp<calyx::CombGroupOp, calyx::OrLibOp>(rewriter, op);
685 }
686 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
687  XOrIOp op) const {
688  return buildLibraryOp<calyx::CombGroupOp, calyx::XorLibOp>(rewriter, op);
689 }
690 
691 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
692  CmpIOp op) const {
693  switch (op.getPredicate()) {
694  case CmpIPredicate::eq:
695  return buildLibraryOp<calyx::CombGroupOp, calyx::EqLibOp>(rewriter, op);
696  case CmpIPredicate::ne:
697  return buildLibraryOp<calyx::CombGroupOp, calyx::NeqLibOp>(rewriter, op);
698  case CmpIPredicate::uge:
699  return buildLibraryOp<calyx::CombGroupOp, calyx::GeLibOp>(rewriter, op);
700  case CmpIPredicate::ult:
701  return buildLibraryOp<calyx::CombGroupOp, calyx::LtLibOp>(rewriter, op);
702  case CmpIPredicate::ugt:
703  return buildLibraryOp<calyx::CombGroupOp, calyx::GtLibOp>(rewriter, op);
704  case CmpIPredicate::ule:
705  return buildLibraryOp<calyx::CombGroupOp, calyx::LeLibOp>(rewriter, op);
706  case CmpIPredicate::sge:
707  return buildLibraryOp<calyx::CombGroupOp, calyx::SgeLibOp>(rewriter, op);
708  case CmpIPredicate::slt:
709  return buildLibraryOp<calyx::CombGroupOp, calyx::SltLibOp>(rewriter, op);
710  case CmpIPredicate::sgt:
711  return buildLibraryOp<calyx::CombGroupOp, calyx::SgtLibOp>(rewriter, op);
712  case CmpIPredicate::sle:
713  return buildLibraryOp<calyx::CombGroupOp, calyx::SleLibOp>(rewriter, op);
714  }
715  llvm_unreachable("unsupported comparison predicate");
716 }
717 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
718  TruncIOp op) const {
719  return buildLibraryOp<calyx::CombGroupOp, calyx::SliceLibOp>(
720  rewriter, op, {op.getOperand().getType()}, {op.getType()});
721 }
722 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
723  ExtUIOp op) const {
724  return buildLibraryOp<calyx::CombGroupOp, calyx::PadLibOp>(
725  rewriter, op, {op.getOperand().getType()}, {op.getType()});
726 }
727 
728 LogicalResult BuildOpGroups::buildOp(PatternRewriter &rewriter,
729  IndexCastOp op) const {
730  Type sourceType = calyx::convIndexType(rewriter, op.getOperand().getType());
731  Type targetType = calyx::convIndexType(rewriter, op.getResult().getType());
732  unsigned targetBits = targetType.getIntOrFloatBitWidth();
733  unsigned sourceBits = sourceType.getIntOrFloatBitWidth();
734  LogicalResult res = success();
735 
736  if (targetBits == sourceBits) {
737  /// Drop the index cast and replace uses of the target value with the source
738  /// value.
739  op.getResult().replaceAllUsesWith(op.getOperand());
740  } else {
741  /// pad/slice the source operand.
742  if (sourceBits > targetBits)
743  res = buildLibraryOp<calyx::CombGroupOp, calyx::SliceLibOp>(
744  rewriter, op, {sourceType}, {targetType});
745  else
746  res = buildLibraryOp<calyx::CombGroupOp, calyx::PadLibOp>(
747  rewriter, op, {sourceType}, {targetType});
748  }
749  rewriter.eraseOp(op);
750  return res;
751 }
752 
753 /// Creates a new Calyx component for each FuncOp in the program.
755  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
756 
757  LogicalResult
759  PatternRewriter &rewriter) const override {
760  /// Maintain a mapping between funcOp input arguments and the port index
761  /// which the argument will eventually map to.
762  DenseMap<Value, unsigned> funcOpArgRewrites;
763 
764  /// Maintain a mapping between funcOp output indexes and the component
765  /// output port index which the return value will eventually map to.
766  DenseMap<unsigned, unsigned> funcOpResultMapping;
767 
768  /// Maintain a mapping between an external memory argument (identified by a
769  /// memref) and eventual component input- and output port indices that will
770  /// map to the memory ports. The pair denotes the start index of the memory
771  /// ports in the in- and output ports of the component. Ports are expected
772  /// to be ordered in the same manner as they are added by
773  /// calyx::appendPortsForExternalMemref.
774  DenseMap<Value, std::pair<unsigned, unsigned>> extMemoryCompPortIndices;
775 
776  /// Create I/O ports. Maintain separate in/out port vectors to determine
777  /// which port index each function argument will eventually map to.
778  SmallVector<calyx::PortInfo> inPorts, outPorts;
779  FunctionType funcType = funcOp.getFunctionType();
780  unsigned extMemCounter = 0;
781  for (auto arg : enumerate(funcOp.getArguments())) {
782  if (isa<MemRefType>(arg.value().getType())) {
783  /// External memories
784  auto memName =
785  "ext_mem" + std::to_string(extMemoryCompPortIndices.size());
786  extMemoryCompPortIndices[arg.value()] = {inPorts.size(),
787  outPorts.size()};
788  calyx::appendPortsForExternalMemref(rewriter, memName, arg.value(),
789  extMemCounter++, inPorts, outPorts);
790  } else {
791  /// Single-port arguments
792  auto inName = "in" + std::to_string(arg.index());
793  funcOpArgRewrites[arg.value()] = inPorts.size();
794  inPorts.push_back(calyx::PortInfo{
795  rewriter.getStringAttr(inName),
796  calyx::convIndexType(rewriter, arg.value().getType()),
798  DictionaryAttr::get(rewriter.getContext(), {})});
799  }
800  }
801  for (auto res : enumerate(funcType.getResults())) {
802  funcOpResultMapping[res.index()] = outPorts.size();
803  outPorts.push_back(calyx::PortInfo{
804  rewriter.getStringAttr("out" + std::to_string(res.index())),
805  calyx::convIndexType(rewriter, res.value()), calyx::Direction::Output,
806  DictionaryAttr::get(rewriter.getContext(), {})});
807  }
808 
809  /// We've now recorded all necessary indices. Merge in- and output ports
810  /// and add the required mandatory component ports.
811  auto ports = inPorts;
812  llvm::append_range(ports, outPorts);
813  calyx::addMandatoryComponentPorts(rewriter, ports);
814 
815  /// Create a calyx::ComponentOp corresponding to the to-be-lowered function.
816  auto compOp = rewriter.create<calyx::ComponentOp>(
817  funcOp.getLoc(), rewriter.getStringAttr(funcOp.getSymName()), ports);
818 
819  /// Mark this component as the toplevel.
820  compOp->setAttr("toplevel", rewriter.getUnitAttr());
821 
822  /// Store the function-to-component mapping.
823  functionMapping[funcOp] = compOp;
824  auto *compState = loweringState().getState<ComponentLoweringState>(compOp);
825  compState->setFuncOpResultMapping(funcOpResultMapping);
826 
827  /// Rewrite funcOp SSA argument values to the CompOp arguments.
828  for (auto &mapping : funcOpArgRewrites)
829  mapping.getFirst().replaceAllUsesWith(
830  compOp.getArgument(mapping.getSecond()));
831 
832  /// Register external memories
833  for (auto extMemPortIndices : extMemoryCompPortIndices) {
834  /// Create a mapping for the in- and output ports using the Calyx memory
835  /// port structure.
836  calyx::MemoryPortsImpl extMemPorts;
837  unsigned inPortsIt = extMemPortIndices.getSecond().first;
838  unsigned outPortsIt = extMemPortIndices.getSecond().second +
839  compOp.getInputPortInfo().size();
840  extMemPorts.readData = compOp.getArgument(inPortsIt++);
841  extMemPorts.done = compOp.getArgument(inPortsIt);
842  extMemPorts.writeData = compOp.getArgument(outPortsIt++);
843  unsigned nAddresses =
844  cast<MemRefType>(extMemPortIndices.getFirst().getType())
845  .getShape()
846  .size();
847  for (unsigned j = 0; j < nAddresses; ++j)
848  extMemPorts.addrPorts.push_back(compOp.getArgument(outPortsIt++));
849  extMemPorts.writeEn = compOp.getArgument(outPortsIt);
850 
851  /// Register the external memory ports as a memory interface within the
852  /// component.
853  compState->registerMemoryInterface(extMemPortIndices.getFirst(),
854  calyx::MemoryInterface(extMemPorts));
855  }
856 
857  return success();
858  }
859 };
860 
861 /// In BuildWhileGroups, a register is created for each iteration argumenet of
862 /// the while op. These registers are then written to on the while op
863 /// terminating yield operation alongside before executing the whileOp in the
864 /// schedule, to set the initial values of the argument registers.
866  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
867 
868  LogicalResult
870  PatternRewriter &rewriter) const override {
871  LogicalResult res = success();
872  funcOp.walk([&](Operation *op) {
873  if (!isa<LoopSchedulePipelineOp>(op))
874  return WalkResult::advance();
875 
876  PipelineWhileOp whileOp(cast<LoopSchedulePipelineOp>(op));
877 
878  getState<ComponentLoweringState>().setUniqueName(whileOp.getOperation(),
879  "while");
880 
881  /// Create iteration argument registers.
882  /// The iteration argument registers will be referenced:
883  /// - In the "before" part of the while loop, calculating the conditional,
884  /// - In the "after" part of the while loop,
885  /// - Outside the while loop, rewriting the while loop return values.
886  for (auto arg : enumerate(whileOp.getBodyArgs())) {
887  std::string name = getState<ComponentLoweringState>()
888  .getUniqueName(whileOp.getOperation())
889  .str() +
890  "_arg" + std::to_string(arg.index());
891  auto reg =
892  createRegister(arg.value().getLoc(), rewriter, getComponent(),
893  arg.value().getType().getIntOrFloatBitWidth(), name);
894  getState<ComponentLoweringState>().addLoopIterReg(whileOp, reg,
895  arg.index());
896  arg.value().replaceAllUsesWith(reg.getOut());
897 
898  /// Also replace uses in the "before" region of the while loop
899  whileOp.getConditionBlock()
900  ->getArgument(arg.index())
901  .replaceAllUsesWith(reg.getOut());
902  }
903 
904  /// Create iter args initial value assignment group(s), one per register.
905  SmallVector<calyx::GroupOp> initGroups;
906  auto numOperands = whileOp.getOperation()->getNumOperands();
907  for (size_t i = 0; i < numOperands; ++i) {
908  auto initGroupOp =
909  getState<ComponentLoweringState>().buildLoopIterArgAssignments(
910  rewriter, whileOp,
911  getState<ComponentLoweringState>().getComponentOp(),
912  getState<ComponentLoweringState>().getUniqueName(
913  whileOp.getOperation()) +
914  "_init_" + std::to_string(i),
915  whileOp.getOperation()->getOpOperand(i));
916  initGroups.push_back(initGroupOp);
917  }
918 
919  /// Add the while op to the list of scheduleable things in the current
920  /// block.
921  getState<ComponentLoweringState>().addBlockScheduleable(
922  whileOp.getOperation()->getBlock(), PipelineScheduleable{
923  whileOp,
924  initGroups,
925  });
926  return WalkResult::advance();
927  });
928  return res;
929  }
930 };
931 
932 /// Builds registers for each pipeline stage in the program.
934  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
935 
936  LogicalResult
938  PatternRewriter &rewriter) const override {
939  funcOp.walk([&](LoopScheduleRegisterOp op) {
940  // Condition registers are handled in BuildWhileGroups.
941  auto *parent = op->getParentOp();
942  auto stage = dyn_cast<LoopSchedulePipelineStageOp>(parent);
943  if (!stage)
944  return;
945 
946  // Create a register for each stage.
947  for (auto &operand : op->getOpOperands()) {
948  unsigned i = operand.getOperandNumber();
949  // Iter args are created in BuildWhileGroups, so just mark the iter arg
950  // register as the appropriate pipeline register.
951  Value stageResult = stage.getResult(i);
952  bool isIterArg = false;
953  for (auto &use : stageResult.getUses()) {
954  if (auto term = dyn_cast<LoopScheduleTerminatorOp>(use.getOwner())) {
955  if (use.getOperandNumber() < term.getIterArgs().size()) {
956  PipelineWhileOp whileOp(
957  dyn_cast<LoopSchedulePipelineOp>(stage->getParentOp()));
958  auto reg = getState<ComponentLoweringState>().getLoopIterReg(
959  whileOp, use.getOperandNumber());
960  getState<ComponentLoweringState>().addPipelineReg(stage, reg, i);
961  isIterArg = true;
962  }
963  }
964  }
965  if (isIterArg)
966  continue;
967 
968  // Create a register for passing this result to later stages.
969  Value value = operand.get();
970  Type resultType = value.getType();
971  assert(isa<IntegerType>(resultType) &&
972  "unsupported pipeline result type");
973  auto name = SmallString<20>("stage_");
974  name += std::to_string(stage.getStageNumber());
975  name += "_register_";
976  name += std::to_string(i);
977  unsigned width = resultType.getIntOrFloatBitWidth();
978  auto reg = createRegister(value.getLoc(), rewriter, getComponent(),
979  width, name);
980  getState<ComponentLoweringState>().addPipelineReg(stage, reg, i);
981 
982  // Note that we do not use replace all uses with here as in
983  // BuildBasicBlockRegs. Instead, we wait until after BuildOpGroups, and
984  // replace all uses inside BuildPipelineGroups, once the pipeline
985  // register created here has been assigned to.
986  }
987  });
988  return success();
989  }
990 };
991 
992 /// Builds groups for assigning registers for pipeline stages.
994  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
995 
996  LogicalResult
998  PatternRewriter &rewriter) const override {
999  for (auto pipeline : funcOp.getOps<LoopSchedulePipelineOp>())
1000  for (auto stage :
1001  pipeline.getStagesBlock().getOps<LoopSchedulePipelineStageOp>())
1002  if (failed(buildStageGroups(pipeline, stage, rewriter)))
1003  return failure();
1004 
1005  return success();
1006  }
1007 
1008  LogicalResult buildStageGroups(LoopSchedulePipelineOp whileOp,
1009  LoopSchedulePipelineStageOp stage,
1010  PatternRewriter &rewriter) const {
1011  // Collect pipeline registers for stage.
1012  auto pipelineRegisters =
1013  getState<ComponentLoweringState>().getPipelineRegs(stage);
1014  // Get the number of pipeline stages in the stages block, excluding the
1015  // terminator. The verifier guarantees there is at least one stage followed
1016  // by a terminator.
1017  size_t numStages = whileOp.getStagesBlock().getOperations().size() - 1;
1018  assert(numStages > 0);
1019 
1020  // Collect group names for the prologue or epilogue.
1021  SmallVector<StringAttr> prologueGroups, epilogueGroups;
1022 
1023  auto updatePrologueAndEpilogue = [&](calyx::GroupOp group) {
1024  // Mark the group for scheduling in the pipeline's block.
1025  getState<ComponentLoweringState>().addBlockScheduleable(stage->getBlock(),
1026  group);
1027 
1028  // Add the group to the prologue or epilogue for this stage as
1029  // necessary. The goal is to fill the pipeline so it will be in steady
1030  // state after the prologue, and drain the pipeline from steady state in
1031  // the epilogue. Every stage but the last should have its groups in the
1032  // prologue, and every stage but the first should have its groups in the
1033  // epilogue.
1034  unsigned stageNumber = stage.getStageNumber();
1035  if (stageNumber < numStages - 1)
1036  prologueGroups.push_back(group.getSymNameAttr());
1037  if (stageNumber > 0)
1038  epilogueGroups.push_back(group.getSymNameAttr());
1039  };
1040 
1041  MutableArrayRef<OpOperand> operands =
1042  stage.getBodyBlock().getTerminator()->getOpOperands();
1043  bool isStageWithNoPipelinedValues =
1044  operands.empty() && !stage.getBodyBlock().empty();
1045  if (isStageWithNoPipelinedValues) {
1046  // Covers the case where there are no values that need to be passed
1047  // through to the next stage, e.g., some intermediary store.
1048  for (auto &op : stage.getBodyBlock())
1049  if (auto group = getState<ComponentLoweringState>()
1050  .getNonPipelinedGroupFrom<calyx::GroupOp>(&op))
1051  updatePrologueAndEpilogue(*group);
1052  }
1053 
1054  for (auto &operand : operands) {
1055  unsigned i = operand.getOperandNumber();
1056  Value value = operand.get();
1057 
1058  // Get the pipeline register for that result.
1059  auto pipelineRegister = pipelineRegisters[i];
1060 
1061  // Get the evaluating group for that value.
1062  calyx::GroupInterface evaluatingGroup =
1063  getState<ComponentLoweringState>().getEvaluatingGroup(value);
1064 
1065  // Remember the final group for this stage result.
1066  calyx::GroupOp group;
1067 
1068  // Stitch the register in, depending on whether the group was
1069  // combinational or sequential.
1070  if (auto combGroup =
1071  dyn_cast<calyx::CombGroupOp>(evaluatingGroup.getOperation()))
1072  group =
1073  convertCombToSeqGroup(combGroup, pipelineRegister, value, rewriter);
1074  else
1075  group =
1076  replaceGroupRegister(evaluatingGroup, pipelineRegister, rewriter);
1077 
1078  // Replace the stage result uses with the register out.
1079  stage.getResult(i).replaceAllUsesWith(pipelineRegister.getOut());
1080 
1081  updatePrologueAndEpilogue(group);
1082  }
1083 
1084  // Append the stage to the prologue or epilogue list of stages if any groups
1085  // were added for this stage. We append a list of groups for each stage, so
1086  // we can group by stage later, when we generate the schedule.
1087  if (!prologueGroups.empty())
1088  getState<ComponentLoweringState>().addPipelinePrologue(whileOp,
1089  prologueGroups);
1090  if (!epilogueGroups.empty())
1091  getState<ComponentLoweringState>().addPipelineEpilogue(whileOp,
1092  epilogueGroups);
1093 
1094  return success();
1095  }
1096 
1097  calyx::GroupOp convertCombToSeqGroup(calyx::CombGroupOp combGroup,
1098  calyx::RegisterOp pipelineRegister,
1099  Value value,
1100  PatternRewriter &rewriter) const {
1101  // Create a sequential group and replace the comb group.
1102  PatternRewriter::InsertionGuard g(rewriter);
1103  rewriter.setInsertionPoint(combGroup);
1104  auto group = rewriter.create<calyx::GroupOp>(combGroup.getLoc(),
1105  combGroup.getName());
1106  rewriter.cloneRegionBefore(combGroup.getBodyRegion(),
1107  &group.getBody().front());
1108  group.getBodyRegion().back().erase();
1109  rewriter.eraseOp(combGroup);
1110 
1111  // Stitch evaluating group to register.
1113  rewriter, group, getState<ComponentLoweringState>().getComponentOp(),
1114  pipelineRegister, value);
1115 
1116  // Mark the new group as the evaluating group.
1117  for (auto assign : group.getOps<calyx::AssignOp>())
1118  getState<ComponentLoweringState>().registerEvaluatingGroup(
1119  assign.getSrc(), group);
1120 
1121  return group;
1122  }
1123 
1124  calyx::GroupOp replaceGroupRegister(calyx::GroupInterface evaluatingGroup,
1125  calyx::RegisterOp pipelineRegister,
1126  PatternRewriter &rewriter) const {
1127  auto group = cast<calyx::GroupOp>(evaluatingGroup.getOperation());
1128 
1129  // Get the group and register that is temporarily being written to.
1130  auto doneOp = group.getDoneOp();
1131  auto tempReg =
1132  cast<calyx::RegisterOp>(cast<OpResult>(doneOp.getSrc()).getOwner());
1133  auto tempIn = tempReg.getIn();
1134  auto tempWriteEn = tempReg.getWriteEn();
1135 
1136  // Replace the register write with a write to the pipeline register.
1137  for (auto assign : group.getOps<calyx::AssignOp>()) {
1138  if (assign.getDest() == tempIn)
1139  assign.getDestMutable().assign(pipelineRegister.getIn());
1140  else if (assign.getDest() == tempWriteEn)
1141  assign.getDestMutable().assign(pipelineRegister.getWriteEn());
1142  }
1143  doneOp.getSrcMutable().assign(pipelineRegister.getDone());
1144 
1145  // Remove the old register completely.
1146  rewriter.eraseOp(tempReg);
1147 
1148  return group;
1149  }
1150 };
1151 
1152 /// Builds a control schedule by traversing the CFG of the function and
1153 /// associating this with the previously created groups.
1154 /// For simplicity, the generated control flow is expanded for all possible
1155 /// paths in the input DAG. This elaborated control flow is later reduced in
1156 /// the runControlFlowSimplification passes.
1158  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
1159 
1160  LogicalResult
1162  PatternRewriter &rewriter) const override {
1163  auto *entryBlock = &funcOp.getBlocks().front();
1164  rewriter.setInsertionPointToStart(
1165  getComponent().getControlOp().getBodyBlock());
1166  auto topLevelSeqOp = rewriter.create<calyx::SeqOp>(funcOp.getLoc());
1167  DenseSet<Block *> path;
1168  return buildCFGControl(path, rewriter, topLevelSeqOp.getBodyBlock(),
1169  nullptr, entryBlock);
1170  }
1171 
1172 private:
1173  /// Sequentially schedules the groups that registered themselves with
1174  /// 'block'.
1175  LogicalResult scheduleBasicBlock(PatternRewriter &rewriter,
1176  const DenseSet<Block *> &path,
1177  mlir::Block *parentCtrlBlock,
1178  mlir::Block *block) const {
1179  auto compBlockScheduleables =
1180  getState<ComponentLoweringState>().getBlockScheduleables(block);
1181  auto loc = block->front().getLoc();
1182 
1183  if (compBlockScheduleables.size() > 1) {
1184  auto seqOp = rewriter.create<calyx::SeqOp>(loc);
1185  parentCtrlBlock = seqOp.getBodyBlock();
1186  }
1187 
1188  for (auto &group : compBlockScheduleables) {
1189  rewriter.setInsertionPointToEnd(parentCtrlBlock);
1190  if (auto groupPtr = std::get_if<calyx::GroupOp>(&group); groupPtr) {
1191  rewriter.create<calyx::EnableOp>(groupPtr->getLoc(),
1192  groupPtr->getSymName());
1193  } else if (auto *pipeSchedPtr = std::get_if<PipelineScheduleable>(&group);
1194  pipeSchedPtr) {
1195  auto &whileOp = pipeSchedPtr->whileOp;
1196 
1197  auto whileCtrlOp =
1198  buildWhileCtrlOp(whileOp, pipeSchedPtr->initGroups, rewriter);
1199  rewriter.setInsertionPointToEnd(whileCtrlOp.getBodyBlock());
1200  auto whileBodyOp =
1201  rewriter.create<calyx::ParOp>(whileOp.getOperation()->getLoc());
1202  rewriter.setInsertionPointToEnd(whileBodyOp.getBodyBlock());
1203 
1204  /// Schedule pipeline stages in the parallel group directly.
1205  auto bodyBlockScheduleables =
1206  getState<ComponentLoweringState>().getBlockScheduleables(
1207  whileOp.getBodyBlock());
1208  for (auto &group : bodyBlockScheduleables)
1209  if (auto *groupPtr = std::get_if<calyx::GroupOp>(&group); groupPtr)
1210  rewriter.create<calyx::EnableOp>(groupPtr->getLoc(),
1211  groupPtr->getSymName());
1212  else
1213  return whileOp.getOperation()->emitError(
1214  "Unsupported block schedulable");
1215 
1216  // Add any prologue or epilogue.
1217  PatternRewriter::InsertionGuard g(rewriter);
1218  rewriter.setInsertionPoint(whileCtrlOp);
1219  getState<ComponentLoweringState>().createPipelinePrologue(
1220  whileOp.getOperation(), rewriter);
1221  rewriter.setInsertionPointAfter(whileCtrlOp);
1222  getState<ComponentLoweringState>().createPipelineEpilogue(
1223  whileOp.getOperation(), rewriter);
1224  } else
1225  llvm_unreachable("Unknown scheduleable");
1226  }
1227  return success();
1228  }
1229 
1230  /// Schedules a block by inserting a branch argument assignment block (if any)
1231  /// before recursing into the scheduling of the block innards.
1232  /// Blocks 'from' and 'to' refer to blocks in the source program.
1233  /// parentCtrlBlock refers to the control block wherein control operations are
1234  /// to be inserted.
1235  LogicalResult schedulePath(PatternRewriter &rewriter,
1236  const DenseSet<Block *> &path, Location loc,
1237  Block *from, Block *to,
1238  Block *parentCtrlBlock) const {
1239  /// Schedule any registered block arguments to be executed before the body
1240  /// of the branch.
1241  rewriter.setInsertionPointToEnd(parentCtrlBlock);
1242  auto preSeqOp = rewriter.create<calyx::SeqOp>(loc);
1243  rewriter.setInsertionPointToEnd(preSeqOp.getBodyBlock());
1244  for (auto barg :
1245  getState<ComponentLoweringState>().getBlockArgGroups(from, to))
1246  rewriter.create<calyx::EnableOp>(barg.getLoc(), barg.getSymName());
1247 
1248  return buildCFGControl(path, rewriter, parentCtrlBlock, from, to);
1249  }
1250 
1251  LogicalResult buildCFGControl(DenseSet<Block *> path,
1252  PatternRewriter &rewriter,
1253  mlir::Block *parentCtrlBlock,
1254  mlir::Block *preBlock,
1255  mlir::Block *block) const {
1256  if (path.count(block) != 0)
1257  return preBlock->getTerminator()->emitError()
1258  << "CFG backedge detected. Loops must be raised to 'scf.while' or "
1259  "'scf.for' operations.";
1260 
1261  rewriter.setInsertionPointToEnd(parentCtrlBlock);
1262  LogicalResult bbSchedResult =
1263  scheduleBasicBlock(rewriter, path, parentCtrlBlock, block);
1264  if (bbSchedResult.failed())
1265  return bbSchedResult;
1266 
1267  path.insert(block);
1268  auto successors = block->getSuccessors();
1269  auto nSuccessors = successors.size();
1270  if (nSuccessors > 0) {
1271  auto brOp = dyn_cast<BranchOpInterface>(block->getTerminator());
1272  assert(brOp);
1273  if (nSuccessors > 1) {
1274  /// TODO(mortbopet): we could choose to support ie. std.switch, but it
1275  /// would probably be easier to just require it to be lowered
1276  /// beforehand.
1277  assert(nSuccessors == 2 &&
1278  "only conditional branches supported for now...");
1279  /// Wrap each branch inside an if/else.
1280  auto cond = brOp->getOperand(0);
1281  auto condGroup = getState<ComponentLoweringState>()
1282  .getEvaluatingGroup<calyx::CombGroupOp>(cond);
1283  auto symbolAttr = FlatSymbolRefAttr::get(
1284  StringAttr::get(getContext(), condGroup.getSymName()));
1285 
1286  auto ifOp = rewriter.create<calyx::IfOp>(
1287  brOp->getLoc(), cond, symbolAttr, /*initializeElseBody=*/true);
1288  rewriter.setInsertionPointToStart(ifOp.getThenBody());
1289  auto thenSeqOp = rewriter.create<calyx::SeqOp>(brOp.getLoc());
1290  rewriter.setInsertionPointToStart(ifOp.getElseBody());
1291  auto elseSeqOp = rewriter.create<calyx::SeqOp>(brOp.getLoc());
1292 
1293  bool trueBrSchedSuccess =
1294  schedulePath(rewriter, path, brOp.getLoc(), block, successors[0],
1295  thenSeqOp.getBodyBlock())
1296  .succeeded();
1297  bool falseBrSchedSuccess = true;
1298  if (trueBrSchedSuccess) {
1299  falseBrSchedSuccess =
1300  schedulePath(rewriter, path, brOp.getLoc(), block, successors[1],
1301  elseSeqOp.getBodyBlock())
1302  .succeeded();
1303  }
1304 
1305  return success(trueBrSchedSuccess && falseBrSchedSuccess);
1306  } else {
1307  /// Schedule sequentially within the current parent control block.
1308  return schedulePath(rewriter, path, brOp.getLoc(), block,
1309  successors.front(), parentCtrlBlock);
1310  }
1311  }
1312  return success();
1313  }
1314 
1315  calyx::WhileOp buildWhileCtrlOp(PipelineWhileOp whileOp,
1316  SmallVector<calyx::GroupOp> initGroups,
1317  PatternRewriter &rewriter) const {
1318  Location loc = whileOp.getLoc();
1319  /// Insert while iter arg initialization group(s). Emit a
1320  /// parallel group to assign one or more registers all at once.
1321  {
1322  PatternRewriter::InsertionGuard g(rewriter);
1323  auto parOp = rewriter.create<calyx::ParOp>(loc);
1324  rewriter.setInsertionPointToStart(parOp.getBodyBlock());
1325  for (calyx::GroupOp group : initGroups)
1326  rewriter.create<calyx::EnableOp>(group.getLoc(), group.getName());
1327  }
1328 
1329  /// Insert the while op itself.
1330  auto cond = whileOp.getConditionValue();
1331  auto condGroup = getState<ComponentLoweringState>()
1332  .getEvaluatingGroup<calyx::CombGroupOp>(cond);
1333  auto symbolAttr = FlatSymbolRefAttr::get(
1334  StringAttr::get(getContext(), condGroup.getSymName()));
1335  auto whileCtrlOp = rewriter.create<calyx::WhileOp>(loc, cond, symbolAttr);
1336 
1337  /// If a bound was specified, add it.
1338  if (auto bound = whileOp.getBound()) {
1339  // Subtract the number of iterations unrolled into the prologue.
1340  auto prologue = getState<ComponentLoweringState>().getPipelinePrologue(
1341  whileOp.getOperation());
1342  auto unrolledBound = *bound - prologue.size();
1343  whileCtrlOp->setAttr("bound", rewriter.getI64IntegerAttr(unrolledBound));
1344  }
1345 
1346  return whileCtrlOp;
1347  }
1348 };
1349 
1350 /// LateSSAReplacement contains various functions for replacing SSA values that
1351 /// were not replaced during op construction.
1353  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
1354 
1355  LogicalResult partiallyLowerFuncToComp(FuncOp funcOp,
1356  PatternRewriter &) const override {
1357  funcOp.walk([&](memref::LoadOp loadOp) {
1358  if (calyx::singleLoadFromMemory(loadOp)) {
1359  /// In buildOpGroups we did not replace loadOp's results, to ensure a
1360  /// link between evaluating groups (which fix the input addresses of a
1361  /// memory op) and a readData result. Now, we may replace these SSA
1362  /// values with their memoryOp readData output.
1363  loadOp.getResult().replaceAllUsesWith(
1364  getState<ComponentLoweringState>()
1365  .getMemoryInterface(loadOp.getMemref())
1366  .readData());
1367  }
1368  });
1369 
1370  return success();
1371  }
1372 };
1373 
1374 /// Erases FuncOp operations.
1376  using FuncOpPartialLoweringPattern::FuncOpPartialLoweringPattern;
1377 
1378  LogicalResult matchAndRewrite(FuncOp funcOp,
1379  PatternRewriter &rewriter) const override {
1380  rewriter.eraseOp(funcOp);
1381  return success();
1382  }
1383 
1384  LogicalResult
1386  PatternRewriter &rewriter) const override {
1387  return success();
1388  }
1389 };
1390 
1391 //===----------------------------------------------------------------------===//
1392 // Pass driver
1393 //===----------------------------------------------------------------------===//
1395  : public circt::impl::LoopScheduleToCalyxBase<LoopScheduleToCalyxPass> {
1396 public:
1398  : LoopScheduleToCalyxBase<LoopScheduleToCalyxPass>(),
1399  partialPatternRes(success()) {}
1400  void runOnOperation() override;
1401 
1402  LogicalResult setTopLevelFunction(mlir::ModuleOp moduleOp,
1403  std::string &topLevelFunction) {
1404  if (!topLevelFunctionOpt.empty()) {
1405  if (SymbolTable::lookupSymbolIn(moduleOp, topLevelFunctionOpt) ==
1406  nullptr) {
1407  moduleOp.emitError() << "Top level function '" << topLevelFunctionOpt
1408  << "' not found in module.";
1409  return failure();
1410  }
1411  topLevelFunction = topLevelFunctionOpt;
1412  } else {
1413  /// No top level function set; infer top level if the module only contains
1414  /// a single function, else, throw error.
1415  auto funcOps = moduleOp.getOps<FuncOp>();
1416  if (std::distance(funcOps.begin(), funcOps.end()) == 1)
1417  topLevelFunction = (*funcOps.begin()).getSymName().str();
1418  else {
1419  moduleOp.emitError()
1420  << "Module contains multiple functions, but no top level "
1421  "function was set. Please see --top-level-function";
1422  return failure();
1423  }
1424  }
1425  return success();
1426  }
1427 
1429  enum class Strategy { Once, Greedy };
1430  RewritePatternSet pattern;
1432  };
1433 
1434  //// Labels the entry point of a Calyx program.
1435  /// Furthermore, this function performs validation on the input function,
1436  /// to ensure that we've implemented the capabilities necessary to convert
1437  /// it.
1438  LogicalResult labelEntryPoint(StringRef topLevelFunction) {
1439  // Program legalization - the partial conversion driver will not run
1440  // unless some pattern is provided - provide a dummy pattern.
1441  struct DummyPattern : public OpRewritePattern<mlir::ModuleOp> {
1442  using OpRewritePattern::OpRewritePattern;
1443  LogicalResult matchAndRewrite(mlir::ModuleOp,
1444  PatternRewriter &) const override {
1445  return failure();
1446  }
1447  };
1448 
1449  ConversionTarget target(getContext());
1450  target.addLegalDialect<calyx::CalyxDialect>();
1451  target.addLegalDialect<scf::SCFDialect>();
1452  target.addIllegalDialect<hw::HWDialect>();
1453  target.addIllegalDialect<comb::CombDialect>();
1454 
1455  // For loops should have been lowered to while loops
1456  target.addIllegalOp<scf::ForOp>();
1457 
1458  // Only accept std operations which we've added lowerings for
1459  target.addIllegalDialect<FuncDialect>();
1460  target.addIllegalDialect<ArithDialect>();
1461  target.addLegalOp<AddIOp, SubIOp, CmpIOp, ShLIOp, ShRUIOp, ShRSIOp, AndIOp,
1462  XOrIOp, OrIOp, ExtUIOp, TruncIOp, CondBranchOp, BranchOp,
1463  MulIOp, DivUIOp, DivSIOp, RemUIOp, RemSIOp, ReturnOp,
1464  arith::ConstantOp, IndexCastOp, FuncOp, ExtSIOp>();
1465 
1466  RewritePatternSet legalizePatterns(&getContext());
1467  legalizePatterns.add<DummyPattern>(&getContext());
1468  DenseSet<Operation *> legalizedOps;
1469  if (applyPartialConversion(getOperation(), target,
1470  std::move(legalizePatterns))
1471  .failed())
1472  return failure();
1473 
1474  // Program conversion
1475  return calyx::applyModuleOpConversion(getOperation(), topLevelFunction);
1476  }
1477 
1478  /// 'Once' patterns are expected to take an additional LogicalResult&
1479  /// argument, to forward their result state (greedyPatternRewriteDriver
1480  /// results are skipped for Once patterns).
1481  template <typename TPattern, typename... PatternArgs>
1482  void addOncePattern(SmallVectorImpl<LoweringPattern> &patterns,
1483  PatternArgs &&...args) {
1484  RewritePatternSet ps(&getContext());
1485  ps.add<TPattern>(&getContext(), partialPatternRes, args...);
1486  patterns.push_back(
1487  LoweringPattern{std::move(ps), LoweringPattern::Strategy::Once});
1488  }
1489 
1490  template <typename TPattern, typename... PatternArgs>
1491  void addGreedyPattern(SmallVectorImpl<LoweringPattern> &patterns,
1492  PatternArgs &&...args) {
1493  RewritePatternSet ps(&getContext());
1494  ps.add<TPattern>(&getContext(), args...);
1495  patterns.push_back(
1496  LoweringPattern{std::move(ps), LoweringPattern::Strategy::Greedy});
1497  }
1498 
1499  LogicalResult runPartialPattern(RewritePatternSet &pattern, bool runOnce) {
1500  assert(pattern.getNativePatterns().size() == 1 &&
1501  "Should only apply 1 partial lowering pattern at once");
1502 
1503  // During component creation, the function body is inlined into the
1504  // component body for further processing. However, proper control flow
1505  // will only be established later in the conversion process, so ensure
1506  // that rewriter optimizations (especially DCE) are disabled.
1507  GreedyRewriteConfig config;
1508  config.enableRegionSimplification =
1509  mlir::GreedySimplifyRegionLevel::Disabled;
1510  if (runOnce)
1511  config.maxIterations = 1;
1512 
1513  /// Can't return applyPatternsAndFoldGreedily. Root isn't
1514  /// necessarily erased so it will always return failed(). Instead,
1515  /// forward the 'succeeded' value from PartialLoweringPatternBase.
1516  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(pattern),
1517  config);
1518  return partialPatternRes;
1519  }
1520 
1521 private:
1522  LogicalResult partialPatternRes;
1523  std::shared_ptr<calyx::CalyxLoweringState> loweringState = nullptr;
1524 };
1525 
1526 void LoopScheduleToCalyxPass::runOnOperation() {
1527  // Clear internal state. See https://github.com/llvm/circt/issues/3235
1528  loweringState.reset();
1529  partialPatternRes = LogicalResult::failure();
1530 
1531  std::string topLevelFunction;
1532  if (failed(setTopLevelFunction(getOperation(), topLevelFunction))) {
1533  signalPassFailure();
1534  return;
1535  }
1536 
1537  /// Start conversion
1538  if (failed(labelEntryPoint(topLevelFunction))) {
1539  signalPassFailure();
1540  return;
1541  }
1542  loweringState = std::make_shared<calyx::CalyxLoweringState>(getOperation(),
1543  topLevelFunction);
1544 
1545  /// --------------------------------------------------------------------------
1546  /// If you are a developer, it may be helpful to add a
1547  /// 'getOperation()->dump()' call after the execution of each stage to
1548  /// view the transformations that's going on.
1549  /// --------------------------------------------------------------------------
1550 
1551  /// A mapping is maintained between a function operation and its corresponding
1552  /// Calyx component.
1553  DenseMap<FuncOp, calyx::ComponentOp> funcMap;
1554  SmallVector<LoweringPattern, 8> loweringPatterns;
1555  calyx::PatternApplicationState patternState;
1556 
1557  /// Creates a new Calyx component for each FuncOp in the inpurt module.
1558  addOncePattern<FuncOpConversion>(loweringPatterns, patternState, funcMap,
1559  *loweringState);
1560 
1561  /// This pattern converts all index typed values to an i32 integer.
1562  addOncePattern<calyx::ConvertIndexTypes>(loweringPatterns, patternState,
1563  funcMap, *loweringState);
1564 
1565  /// This pattern creates registers for all basic-block arguments.
1566  addOncePattern<calyx::BuildBasicBlockRegs>(loweringPatterns, patternState,
1567  funcMap, *loweringState);
1568 
1569  /// This pattern creates registers for the function return values.
1570  addOncePattern<calyx::BuildReturnRegs>(loweringPatterns, patternState,
1571  funcMap, *loweringState);
1572 
1573  /// This pattern creates registers for iteration arguments of scf.while
1574  /// operations. Additionally, creates a group for assigning the initial
1575  /// value of the iteration argument registers.
1576  addOncePattern<BuildWhileGroups>(loweringPatterns, patternState, funcMap,
1577  *loweringState);
1578 
1579  /// This pattern creates registers for all pipeline stages.
1580  addOncePattern<BuildPipelineRegs>(loweringPatterns, patternState, funcMap,
1581  *loweringState);
1582 
1583  /// This pattern converts operations within basic blocks to Calyx library
1584  /// operators. Combinational operations are assigned inside a
1585  /// calyx::CombGroupOp, and sequential inside calyx::GroupOps.
1586  /// Sequential groups are registered with the Block* of which the operation
1587  /// originated from. This is used during control schedule generation. By
1588  /// having a distinct group for each operation, groups are analogous to SSA
1589  /// values in the source program.
1590  addOncePattern<BuildOpGroups>(loweringPatterns, patternState, funcMap,
1591  *loweringState);
1592 
1593  /// This pattern creates groups for all pipeline stages.
1594  addOncePattern<BuildPipelineGroups>(loweringPatterns, patternState, funcMap,
1595  *loweringState);
1596 
1597  /// This pattern traverses the CFG of the program and generates a control
1598  /// schedule based on the calyx::GroupOp's which were registered for each
1599  /// basic block in the source function.
1600  addOncePattern<BuildControl>(loweringPatterns, patternState, funcMap,
1601  *loweringState);
1602 
1603  /// This pass recursively inlines use-def chains of combinational logic (from
1604  /// non-stateful groups) into groups referenced in the control schedule.
1605  addOncePattern<calyx::InlineCombGroups>(loweringPatterns, patternState,
1606  *loweringState);
1607 
1608  /// This pattern performs various SSA replacements that must be done
1609  /// after control generation.
1610  addOncePattern<LateSSAReplacement>(loweringPatterns, patternState, funcMap,
1611  *loweringState);
1612 
1613  /// Eliminate any unused combinational groups. This is done before
1614  /// calyx::RewriteMemoryAccesses to avoid inferring slice components for
1615  /// groups that will be removed.
1616  addGreedyPattern<calyx::EliminateUnusedCombGroups>(loweringPatterns);
1617 
1618  /// This pattern rewrites accesses to memories which are too wide due to
1619  /// index types being converted to a fixed-width integer type.
1620  addOncePattern<calyx::RewriteMemoryAccesses>(loweringPatterns, patternState,
1621  *loweringState);
1622 
1623  /// This pattern removes the source FuncOp which has now been converted into
1624  /// a Calyx component.
1625  addOncePattern<CleanupFuncOps>(loweringPatterns, patternState, funcMap,
1626  *loweringState);
1627 
1628  /// Sequentially apply each lowering pattern.
1629  for (auto &pat : loweringPatterns) {
1630  LogicalResult partialPatternRes = runPartialPattern(
1631  pat.pattern,
1632  /*runOnce=*/pat.strategy == LoweringPattern::Strategy::Once);
1633  if (succeeded(partialPatternRes))
1634  continue;
1635  signalPassFailure();
1636  return;
1637  }
1638 
1639  //===--------------------------------------------------------------------===//
1640  // Cleanup patterns
1641  //===--------------------------------------------------------------------===//
1642  RewritePatternSet cleanupPatterns(&getContext());
1643  cleanupPatterns.add<calyx::MultipleGroupDonePattern,
1644  calyx::NonTerminatingGroupDonePattern>(&getContext());
1645  if (failed(applyPatternsAndFoldGreedily(getOperation(),
1646  std::move(cleanupPatterns)))) {
1647  signalPassFailure();
1648  return;
1649  }
1650 
1651  if (ciderSourceLocationMetadata) {
1652  // Debugging information for the Cider debugger.
1653  // Reference: https://docs.calyxir.org/debug/cider.html
1654  SmallVector<Attribute, 16> sourceLocations;
1655  getOperation()->walk([&](calyx::ComponentOp component) {
1656  return getCiderSourceLocationMetadata(component, sourceLocations);
1657  });
1658 
1659  MLIRContext *context = getOperation()->getContext();
1660  getOperation()->setAttr("calyx.metadata",
1661  ArrayAttr::get(context, sourceLocations));
1662  }
1663 }
1664 
1665 } // namespace pipelinetocalyx
1666 
1667 //===----------------------------------------------------------------------===//
1668 // Pass initialization
1669 //===----------------------------------------------------------------------===//
1670 
1671 std::unique_ptr<OperationPass<ModuleOp>> createLoopScheduleToCalyxPass() {
1672  return std::make_unique<pipelinetocalyx::LoopScheduleToCalyxPass>();
1673 }
1674 
1675 } // namespace circt
assert(baseType &&"element must be base type")
int32_t width
Definition: FIRRTL.cpp:36
@ Input
Definition: HW.h:35
@ Output
Definition: HW.h:35
static Block * getBodyBlock(FModuleLike mod)
RewritePatternSet pattern
std::shared_ptr< calyx::CalyxLoweringState > loweringState
LogicalResult partialPatternRes
void setFuncOpResultMapping(const DenseMap< unsigned, unsigned > &mapping)
Assign a mapping between the source funcOp result indices and the corresponding output port indices o...
std::string getUniqueName(StringRef prefix)
Returns a unique name within compOp with the provided prefix.
void registerMemoryInterface(Value memref, const calyx::MemoryInterface &memoryInterface)
Registers a memory interface as being associated with a memory identified by 'memref'.
calyx::ComponentOp getComponentOp()
Returns the calyx::ComponentOp associated with this lowering state.
FuncOpPartialLoweringPatterns are patterns which intend to match on FuncOps and then perform their ow...
Holds common utilities used for scheduling when lowering to Calyx.
Builds a control schedule by traversing the CFG of the function and associating this with the previou...
LogicalResult buildCFGControl(DenseSet< Block * > path, PatternRewriter &rewriter, mlir::Block *parentCtrlBlock, mlir::Block *preBlock, mlir::Block *block) const
LogicalResult schedulePath(PatternRewriter &rewriter, const DenseSet< Block * > &path, Location loc, Block *from, Block *to, Block *parentCtrlBlock) const
Schedules a block by inserting a branch argument assignment block (if any) before recursing into the ...
LogicalResult scheduleBasicBlock(PatternRewriter &rewriter, const DenseSet< Block * > &path, mlir::Block *parentCtrlBlock, mlir::Block *block) const
Sequentially schedules the groups that registered themselves with 'block'.
calyx::WhileOp buildWhileCtrlOp(PipelineWhileOp whileOp, SmallVector< calyx::GroupOp > initGroups, PatternRewriter &rewriter) const
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
Iterate through the operations of a source function and instantiate components or primitives based on...
LogicalResult buildLibraryOp(PatternRewriter &rewriter, TSrcOp op) const
buildLibraryOp which provides in- and output types based on the operands and results of the op argume...
void assignAddressPorts(PatternRewriter &rewriter, Location loc, calyx::GroupInterface group, calyx::MemoryInterface memoryInterface, Operation::operand_range addressValues) const
Creates assignments within the provided group to the address ports of the memoryOp based on the provi...
LogicalResult buildLibraryOp(PatternRewriter &rewriter, TSrcOp op, TypeRange srcTypes, TypeRange dstTypes) const
buildLibraryOp will build a TCalyxLibOp inside a TGroupOp based on the source operation TSrcOp.
LogicalResult buildLibraryBinaryPipeOp(PatternRewriter &rewriter, TSrcOp op, TOpType opPipe, Value out) const
buildLibraryBinaryPipeOp will build a TCalyxLibBinaryPipeOp, to deal with MulIOp, DivUIOp and RemUIOp...
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
TGroupOp createGroupForOp(PatternRewriter &rewriter, Operation *op) const
Creates a group named by the basic block which the input op resides in.
Builds groups for assigning registers for pipeline stages.
calyx::GroupOp replaceGroupRegister(calyx::GroupInterface evaluatingGroup, calyx::RegisterOp pipelineRegister, PatternRewriter &rewriter) const
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
LogicalResult buildStageGroups(LoopSchedulePipelineOp whileOp, LoopSchedulePipelineStageOp stage, PatternRewriter &rewriter) const
calyx::GroupOp convertCombToSeqGroup(calyx::CombGroupOp combGroup, calyx::RegisterOp pipelineRegister, Value value, PatternRewriter &rewriter) const
Builds registers for each pipeline stage in the program.
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
In BuildWhileGroups, a register is created for each iteration argumenet of the while op.
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
LogicalResult matchAndRewrite(FuncOp funcOp, PatternRewriter &rewriter) const override
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
Handles the current state of lowering of a Calyx component.
LateSSAReplacement contains various functions for replacing SSA values that were not replaced during ...
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &) const override
void addOncePattern(SmallVectorImpl< LoweringPattern > &patterns, PatternArgs &&...args)
'Once' patterns are expected to take an additional LogicalResult& argument, to forward their result s...
LogicalResult labelEntryPoint(StringRef topLevelFunction)
Labels the entry point of a Calyx program.
void addGreedyPattern(SmallVectorImpl< LoweringPattern > &patterns, PatternArgs &&...args)
LogicalResult setTopLevelFunction(mlir::ModuleOp moduleOp, std::string &topLevelFunction)
LogicalResult runPartialPattern(RewritePatternSet &pattern, bool runOnce)
Holds additional information required for scheduling Pipeline pipelines.
void createPipelinePrologue(Operation *op, PatternRewriter &rewriter)
Create the pipeline prologue.
SmallVector< SmallVector< StringAttr > > getPipelinePrologue(Operation *op)
Get the pipeline prologue.
const DenseMap< unsigned, calyx::RegisterOp > & getPipelineRegs(Operation *stage)
Return a mapping of stage result indices to pipeline registers.
std::optional< TGroupOp > getNonPipelinedGroupFrom(Operation *op)
Returns the group registered for this non-pipelined value, and None otherwise.
void addPipelineReg(Operation *stage, calyx::RegisterOp reg, unsigned idx)
Register reg as being the idx'th pipeline register for the stage.
void addPipelineEpilogue(Operation *op, SmallVector< StringAttr > groupNames)
Add a stage's groups to the pipeline epilogue.
DenseMap< Operation *, calyx::GroupInterface > operationToGroup
A mapping between operations and the group to which it was assigned.
void createPipelineEpilogue(Operation *op, PatternRewriter &rewriter)
Create the pipeline epilogue.
void addPipelinePrologue(Operation *op, SmallVector< StringAttr > groupNames)
Add a stage's groups to the pipeline prologue.
void registerNonPipelineOperations(Operation *op, calyx::GroupInterface group)
Registers operations that may be used in a pipeline, but does not produce a value to be used in a fur...
DenseMap< Operation *, SmallVector< SmallVector< StringAttr > > > pipelineEpilogue
A mapping from pipeline ops to a vector of vectors of group names that constitute the pipeline epilog...
DenseMap< Operation *, SmallVector< SmallVector< StringAttr > > > pipelinePrologue
A mapping from pipeline ops to a vector of vectors of group names that constitute the pipeline prolog...
DenseMap< Operation *, DenseMap< unsigned, calyx::RegisterOp > > pipelineRegs
A mapping from pipeline stages to their registers.
Block::BlockArgListType getBodyArgs() override
std::optional< int64_t > getBound() override
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition: CalyxOps.cpp:54
void addMandatoryComponentPorts(PatternRewriter &rewriter, SmallVectorImpl< calyx::PortInfo > &ports)
void appendPortsForExternalMemref(PatternRewriter &rewriter, StringRef memName, Value memref, unsigned memoryID, SmallVectorImpl< calyx::PortInfo > &inPorts, SmallVectorImpl< calyx::PortInfo > &outPorts)
void buildAssignmentsForRegisterWrite(OpBuilder &builder, calyx::GroupOp groupOp, calyx::ComponentOp componentOp, calyx::RegisterOp &reg, Value inputValue)
Creates register assignment operations within the provided groupOp.
DenseMap< const mlir::RewritePattern *, SmallPtrSet< Operation *, 16 > > PatternApplicationState
Extra state that is passed to all PartialLoweringPatterns so they can record when they have run on an...
Type convIndexType(OpBuilder &builder, Type type)
LogicalResult applyModuleOpConversion(mlir::ModuleOp, StringRef topLevelFunction)
Helper to update the top-level ModuleOp to set the entrypoing function.
WalkResult getCiderSourceLocationMetadata(calyx::ComponentOp component, SmallVectorImpl< Attribute > &sourceLocations)
bool matchConstantOp(Operation *op, APInt &value)
unsigned handleZeroWidth(int64_t dim)
hw::ConstantOp createConstant(Location loc, OpBuilder &builder, ComponentOp component, size_t width, size_t value)
A helper function to create constants in the HW dialect.
calyx::RegisterOp createRegister(Location loc, OpBuilder &builder, ComponentOp component, size_t width, Twine prefix)
Creates a RegisterOp, with input and output port bit widths defined by width.
bool noStoresToMemory(Value memoryReference)
bool singleLoadFromMemory(Value memoryReference)
static LogicalResult buildAllocOp(ComponentLoweringState &componentState, PatternRewriter &rewriter, TAllocOp allocOp)
std::variant< calyx::GroupOp, PipelineScheduleable > Scheduleable
A variant of types representing scheduleable operations.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition: DebugAnalysis.h:21
std::unique_ptr< OperationPass< ModuleOp > > createLoopScheduleToCalyxPass()
Create a LoopSchedule to Calyx conversion pass.
def reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)
Definition: seq.py:20
std::optional< Value > done
std::optional< Value > writeEn
std::optional< Value > writeData
std::optional< Value > readData
SmallVector< Value > addrPorts
When building groups which contain accesses to multiple sequential components, a group_done op is cre...
GroupDoneOp's are terminator operations and should therefore be the last operator in a group.
This holds information about the port for either a Component or Cell.
Definition: CalyxOps.h:85
Creates a new Calyx component for each FuncOp in the program.
LogicalResult partiallyLowerFuncToComp(FuncOp funcOp, PatternRewriter &rewriter) const override
SmallVector< calyx::GroupOp > initGroups
The group(s) to schedule before the while operation These groups should set the initial value(s) of t...
PipelineWhileOp whileOp
While operation to schedule.