CIRCT 20.0.0git
Loading...
Searching...
No Matches
LowerSeqHLMem.cpp
Go to the documentation of this file.
1//===- LowerSeqHLMem.cpp - seq.hlmem lowering -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass pattern matches lowering patterns on seq.hlmem ops and referencing
10// ports.
11//
12//===----------------------------------------------------------------------===//
13
17#include "mlir/Pass/Pass.h"
18#include "mlir/Transforms/DialectConversion.h"
19#include "llvm/ADT/TypeSwitch.h"
20
21namespace circt {
22namespace seq {
23#define GEN_PASS_DEF_LOWERSEQHLMEM
24#include "circt/Dialect/Seq/SeqPasses.h.inc"
25} // namespace seq
26} // namespace circt
27
28using namespace circt;
29using namespace seq;
30
31namespace {
32
33struct SimpleBehavioralMemoryLowering
34 : public OpConversionPattern<seq::HLMemOp> {
35 // A simple behavioral SV implementation of a HLMemOp. This is intended as a
36 // fall-back pattern if any other higher benefit/target-specific patterns
37 // failed to match.
38public:
39 using OpConversionPattern::OpConversionPattern;
40
41 LogicalResult
42 matchAndRewrite(seq::HLMemOp mem, OpAdaptor adaptor,
43 ConversionPatternRewriter &rewriter) const final {
44
45 // Only support unidimensional memories.
46 auto memType = mem.getMemType();
47 if (memType.getShape().size() != 1)
48 return rewriter.notifyMatchFailure(
49 mem, "only unidimensional memories are supported");
50 auto size = memType.getShape()[0];
51
52 // Gather up the referencing ops.
53 llvm::SmallVector<seq::ReadPortOp> readOps;
54 llvm::SmallVector<seq::WritePortOp> writeOps;
55 for (auto *user : mem.getHandle().getUsers()) {
56 auto res = llvm::TypeSwitch<Operation *, LogicalResult>(user)
57 .Case([&](seq::ReadPortOp op) {
58 readOps.push_back(op);
59 return success();
60 })
61 .Case([&](seq::WritePortOp op) {
62 writeOps.push_back(op);
63 return success();
64 })
65 .Default([&](Operation *op) { return failure(); });
66 if (failed(res))
67 return rewriter.notifyMatchFailure(user, "unsupported port type");
68 }
69
70 auto clk = mem.getClk();
71 auto rst = mem.getRst();
72 auto memName = mem.getName();
73
74 // Create the SV memory.
75 hw::UnpackedArrayType memArrType =
76 hw::UnpackedArrayType::get(memType.getElementType(), size);
77 auto svMem =
78 rewriter.create<sv::RegOp>(mem.getLoc(), memArrType, mem.getNameAttr())
79 .getResult();
80
81 // Create write ports by gathering up the write port inputs and
82 // materializing the writes inside a single always ff block.
83 struct WriteTuple {
84 Location loc;
85 Value addr;
86 Value data;
87 Value en;
88 };
89 llvm::SmallVector<WriteTuple> writeTuples;
90 for (auto writeOp : writeOps) {
91 if (writeOp.getLatency() != 1)
92 return rewriter.notifyMatchFailure(
93 writeOp, "only supports write ports with latency == 1");
94 auto addr = writeOp.getAddresses()[0];
95 auto data = writeOp.getInData();
96 auto en = writeOp.getWrEn();
97 writeTuples.push_back({writeOp.getLoc(), addr, data, en});
98 rewriter.eraseOp(writeOp);
99 }
100
101 auto hwClk = rewriter.create<seq::FromClockOp>(clk.getLoc(), clk);
102 rewriter.create<sv::AlwaysFFOp>(
103 mem.getLoc(), sv::EventControl::AtPosEdge, hwClk,
104 sv::ResetType::SyncReset, sv::EventControl::AtPosEdge, rst, [&] {
105 for (auto [loc, address, data, en] : writeTuples) {
106 Value a = address, d = data; // So the lambda can capture.
107 Location l = loc;
108 // Perform write upon write enable being high.
109 rewriter.create<sv::IfOp>(loc, en, [&] {
110 Value memLoc =
111 rewriter.create<sv::ArrayIndexInOutOp>(l, svMem, a);
112 rewriter.create<sv::PAssignOp>(l, memLoc, d);
113 });
114 }
115 });
116
117 // Create read ports. When latency > 1, we perform the read 1 cycle before
118 // the latency deadline, and register the read output. By doing so, we avoid
119 // start a critical path right after the combinational read.
120 // TODO: When latency > 2, and assuming that the exact read time is flexible
121 // within the latency window, we could decide on whether to buffer the read
122 // data or the read address more, based on which is narrower (saving area).
123 for (auto [ri, readOp] : llvm::enumerate(readOps)) {
124 rewriter.setInsertionPointAfter(readOp);
125 auto loc = readOp.getLoc();
126
127 auto readAddress = readOp.getAddresses()[0];
128 unsigned latency = readOp.getLatency();
129 unsigned addressDelayCycles = latency - 1;
130 if (latency > 0) {
131 // Materialize any delays on the read address.
132 for (unsigned i = 0; i < addressDelayCycles; ++i) {
133 readAddress = rewriter.create<seq::CompRegOp>(
134 loc, readAddress, clk,
135 rewriter.getStringAttr(memName + "_rdaddr" + std::to_string(ri) +
136 "_dly" + std::to_string(i)));
137 }
138 }
139
140 // Create a combinational read.
141 Value memLoc =
142 rewriter.create<sv::ArrayIndexInOutOp>(loc, svMem, readAddress);
143 Value readData = rewriter.create<sv::ReadInOutOp>(loc, memLoc);
144 if (latency > 0) {
145 // Register the read data.
146 readData = rewriter.create<seq::CompRegOp>(
147 loc, readData, clk,
148 rewriter.getStringAttr(memName + "_rd" + std::to_string(ri) +
149 "_reg"));
150 }
151 rewriter.replaceOp(readOp, {readData});
152 }
153
154 rewriter.eraseOp(mem);
155 return success();
156 }
157};
158struct LowerSeqHLMemPass
159 : public circt::seq::impl::LowerSeqHLMemBase<LowerSeqHLMemPass> {
160 void runOnOperation() override;
161};
162
163} // namespace
164
165void LowerSeqHLMemPass::runOnOperation() {
166 hw::HWModuleOp top = getOperation();
167
168 MLIRContext &ctxt = getContext();
169 ConversionTarget target(ctxt);
170
171 // Lowering patterns must lower away all HLMem-related operations.
172 target.addIllegalOp<seq::HLMemOp, seq::ReadPortOp, seq::WritePortOp>();
173 target.addLegalDialect<sv::SVDialect, seq::SeqDialect>();
174 RewritePatternSet patterns(&ctxt);
175 patterns.add<SimpleBehavioralMemoryLowering>(&ctxt);
176
177 if (failed(applyPartialConversion(top, target, std::move(patterns))))
178 signalPassFailure();
179}
180
181std::unique_ptr<Pass> circt::seq::createLowerSeqHLMemPass() {
182 return std::make_unique<LowerSeqHLMemPass>();
183}
create(value)
Definition sv.py:106
Definition sv.py:68
std::unique_ptr< mlir::Pass > createLowerSeqHLMemPass()
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition seq.py:1