18 #include "mlir/Pass/Pass.h"
19 #include "llvm/Support/Debug.h"
22 #define DEBUG_TYPE "lower-memory"
26 #define GEN_PASS_DEF_FLATTENMEMORY
27 #include "circt/Dialect/FIRRTL/Passes.h.inc"
31 using namespace circt;
32 using namespace firrtl;
35 struct FlattenMemoryPass
36 :
public circt::firrtl::impl::FlattenMemoryBase<FlattenMemoryPass> {
39 void runOnOperation()
override {
40 LLVM_DEBUG(llvm::dbgs() <<
"\n Running lower memory on module:"
42 SmallVector<Operation *> opsToErase;
43 auto hasSubAnno = [&](MemOp op) ->
bool {
44 for (
size_t portIdx = 0, e = op.getNumResults(); portIdx < e; ++portIdx)
45 for (
auto attr : op.getPortAnnotation(portIdx))
46 if (cast<DictionaryAttr>(attr).get(
"circt.fieldID"))
51 getOperation().getBodyBlock()->walk([&](MemOp memOp) {
52 LLVM_DEBUG(llvm::dbgs() <<
"\n Memory:" << memOp);
54 SmallVector<IntType> flatMemType;
58 uint32_t totalmaskWidths = 0;
60 SmallVector<unsigned> maskWidths;
65 for (
auto res : memOp.getResults())
66 if (isa<RefType>(res.getType()))
71 if (hasSubAnno(memOp) || !flattenType(memOp.getDataType(), flatMemType))
74 SmallVector<Operation *, 8> flatData;
75 SmallVector<int32_t> memWidths;
76 size_t memFlatWidth = 0;
78 for (
auto f : flatMemType) {
79 LLVM_DEBUG(llvm::dbgs() <<
"\n field type:" << f);
80 auto w = *f.getWidth();
81 memWidths.push_back(w);
87 maskGran = memWidths[0];
89 for (
auto w : memWidths) {
90 maskGran = std::gcd(maskGran, w);
92 for (
auto w : memWidths) {
94 auto mWidth = w / maskGran;
95 maskWidths.push_back(mWidth);
96 totalmaskWidths += mWidth;
100 SmallVector<Type, 8> ports;
101 SmallVector<Attribute, 8> portNames;
103 auto *context = memOp.getContext();
104 ImplicitLocOpBuilder builder(memOp.getLoc(), memOp);
107 auto opPorts = memOp.getPorts();
108 for (
size_t portIdx = 0, e = opPorts.size(); portIdx < e; ++portIdx) {
109 auto port = opPorts[portIdx];
110 ports.push_back(MemOp::getTypeForPort(memOp.getDepth(), flatType,
111 port.second, totalmaskWidths));
112 portNames.push_back(port.first);
115 auto flatMem = builder.create<MemOp>(
116 ports, memOp.getReadLatency(), memOp.getWriteLatency(),
117 memOp.getDepth(), memOp.getRuw(), builder.getArrayAttr(portNames),
118 memOp.getNameAttr(), memOp.getNameKind(), memOp.getAnnotations(),
119 memOp.getPortAnnotations(), memOp.getInnerSymAttr(),
120 memOp.getInitAttr(), memOp.getPrefixAttr());
122 for (
size_t index = 0, rend = memOp.getNumResults(); index < rend;
124 auto result = memOp.getResult(index);
126 .create<WireOp>(result.getType(),
127 (memOp.getName() +
"_" +
128 memOp.getPortName(index).getValue())
131 result.replaceAllUsesWith(wire);
133 auto newResult = flatMem.getResult(index);
134 auto rType = type_cast<BundleType>(result.getType());
135 for (
size_t fieldIndex = 0, fend = rType.getNumElements();
136 fieldIndex != fend; ++fieldIndex) {
137 auto name = rType.getElement(fieldIndex).name.getValue();
138 auto oldField = builder.create<SubfieldOp>(result, fieldIndex);
140 builder.create<SubfieldOp>(newResult, fieldIndex);
143 if (!(name ==
"data" || name ==
"mask" || name ==
"wdata" ||
144 name ==
"wmask" || name ==
"rdata")) {
148 Value realOldField = oldField;
149 if (rType.getElement(fieldIndex).isFlip) {
152 builder.createOrFold<BitCastOp>(oldField.getType(), newField);
158 auto newFieldType = newField.getType();
159 auto oldFieldBitWidth =
getBitWidth(oldField.getType());
162 if (
getBitWidth(newFieldType) != *oldFieldBitWidth)
164 realOldField = builder.create<BitCastOp>(newFieldType, oldField);
168 if ((name ==
"mask" || name ==
"wmask") &&
169 (maskWidths.size() != totalmaskWidths)) {
171 for (
const auto &m : llvm::enumerate(maskWidths)) {
173 auto mBit = builder.createOrFold<BitsPrimOp>(
174 realOldField, m.index(), m.index());
176 for (
size_t repeat = 0; repeat < m.value(); repeat++)
177 if ((m.index() == 0 && repeat == 0) || !catMasks)
180 catMasks = builder.createOrFold<CatPrimOp>(mBit, catMasks);
182 realOldField = catMasks;
187 builder.createOrFold<BitCastOp>(newField.getType(),
202 static bool flattenType(
FIRRTLType type, SmallVectorImpl<IntType> &results) {
205 .
Case<BundleType>([&](
auto bundle) {
206 for (
auto &elt : bundle)
207 if (!flatten(elt.type))
211 .Case<FVectorType>([&](
auto vector) {
212 for (
size_t i = 0, e = vector.getNumElements(); i != e; ++i)
213 if (!flatten(vector.getElementType()))
217 .Case<IntType>([&](
auto iType) {
218 results.push_back({iType});
219 return iType.getWidth().has_value();
221 .Default([&](
auto) {
return false; });
224 if (flatten(type) && results.size() > 1)
229 Value getSubWhatever(ImplicitLocOpBuilder *builder, Value val,
size_t index) {
230 if (BundleType bundle = type_dyn_cast<BundleType>(val.getType()))
231 return builder->create<SubfieldOp>(val, index);
232 if (FVectorType fvector = type_dyn_cast<FVectorType>(val.getType()))
233 return builder->create<SubindexOp>(val, index);
235 llvm_unreachable(
"Unknown aggregate type");
242 return std::make_unique<FlattenMemoryPass>();
This class implements the same functionality as TypeSwitch except that it uses firrtl::type_dyn_cast ...
FIRRTLTypeSwitch< T, ResultT > & Case(CallableT &&caseFn)
Add a case on the given type.
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
mlir::TypedValue< FIRRTLBaseType > FIRRTLBaseValue
void emitConnect(OpBuilder &builder, Location loc, Value lhs, Value rhs)
Emit a connect between two values.
std::optional< int64_t > getBitWidth(FIRRTLBaseType type, bool ignoreFlip=false)
std::unique_ptr< mlir::Pass > createFlattenMemoryPass()
StringAttr getName(ArrayAttr names, size_t idx)
Return the name at the specified index of the ArrayAttr or null if it cannot be determined.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.