24#include "mlir/IR/ImplicitLocOpBuilder.h"
25#include "mlir/Pass/Pass.h"
26#include "llvm/ADT/TypeSwitch.h"
27#include "llvm/Support/Path.h"
35#define GEN_PASS_DEF_HWMEMSIMIMPL
36#include "circt/Dialect/Seq/SeqPasses.h.inc"
49 bool disableMemRandomization;
50 bool disableRegRandomization;
51 bool addVivadoRAMAddressConflictSynthesisBugWorkaround;
53 SmallVector<sv::RegOp> registers;
55 Value addPipelineStages(ImplicitLocOpBuilder &b,
57 size_t stages, Value clock, Value
data,
58 const Twine &name, Value gate = {});
59 sv::AlwaysOp lastPipelineAlwaysOp;
64 HWMemSimImpl(ReadEnableMode readEnableMode,
bool addMuxPragmas,
65 bool disableMemRandomization,
bool disableRegRandomization,
66 bool addVivadoRAMAddressConflictSynthesisBugWorkaround,
68 : readEnableMode(readEnableMode), addMuxPragmas(addMuxPragmas),
69 disableMemRandomization(disableMemRandomization),
70 disableRegRandomization(disableRegRandomization),
71 addVivadoRAMAddressConflictSynthesisBugWorkaround(
72 addVivadoRAMAddressConflictSynthesisBugWorkaround),
73 mlirModuleNamespace(mlirModuleNamespace) {}
75 void generateMemory(
HWModuleOp op, FirMemory mem);
78struct HWMemSimImplPass :
public impl::HWMemSimImplBase<HWMemSimImplPass> {
79 using HWMemSimImplBase::HWMemSimImplBase;
81 void runOnOperation()
override;
90 Operation *valueOp = value.getDefiningOp();
92 valueOp ? valueOp->getBlock() : cast<BlockArgument>(value).getOwner();
93 while (op->getBlock() && op->getBlock() != valueBlock)
94 op = op->getParentOp();
95 return valueBlock == op->getBlock() &&
96 (!valueOp || valueOp->isBeforeInBlock(op));
109static Value
getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr,
110 bool addMuxPragmas) {
112 b, sv::ArrayIndexInOutOp::create(b, memory, addr));
114 if (!addMuxPragmas ||
115 cast<hw::UnpackedArrayType>(
116 cast<hw::InOutType>(memory.getType()).getElementType())
117 .getNumElements() <= 1)
120 slot, sv::SVAttributeAttr::get(b.getContext(),
"cadence map_to_mux",
124 sv::setSVAttributes(assignOp,
125 sv::SVAttributeAttr::get(b.getContext(),
126 "synopsys infer_mux_override",
132Value HWMemSimImpl::addPipelineStages(ImplicitLocOpBuilder &b,
134 size_t stages, Value clock, Value data,
135 const Twine &name, Value gate) {
141 auto alwaysOp = lastPipelineAlwaysOp;
143 if (alwaysOp.getClocks() != ValueRange{clock} ||
149 alwaysOp = sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock);
152 auto savedIP = b.saveInsertionPoint();
153 SmallVector<sv::RegOp> regs;
154 b.setInsertionPoint(alwaysOp);
155 for (
unsigned i = 0; i < stages; ++i) {
157 b.getStringAttr(moduleNamespace.
newName(
"_" + name +
"_d" + Twine(i)));
158 auto reg = sv::RegOp::create(b,
data.getType(), regName,
159 hw::InnerSymAttr::get(regName));
161 registers.push_back(
reg);
165 b.setInsertionPointToEnd(alwaysOp.getBodyBlock());
166 for (
unsigned i = 0; i < stages; ++i) {
169 auto emitAssign = [&] { sv::PAssignOp::create(b, regs[i], data); };
171 sv::IfOp::create(b, gate, [&]() { emitAssign(); });
177 b.restoreInsertionPoint(savedIP);
180 lastPipelineAlwaysOp = alwaysOp;
184void HWMemSimImpl::generateMemory(
HWModuleOp op, FirMemory mem) {
185 ImplicitLocOpBuilder b(op.getLoc(), op.getBody());
190 if (mem.maskGran == 0)
191 mem.maskGran = mem.dataWidth;
192 auto maskBits = mem.dataWidth / mem.maskGran;
193 bool isMasked = maskBits > 1;
195 auto dataType = b.getIntegerType(mem.dataWidth);
199 mem.numReadPorts + mem.numWritePorts + mem.numReadWritePorts;
203 sv::RegOp::create(b, UnpackedArrayType::get(dataType, mem.depth),
204 b.getStringAttr(
"Memory"));
206 if (addVivadoRAMAddressConflictSynthesisBugWorkaround) {
207 if (mem.readLatency == 0) {
214 reg, sv::SVAttributeAttr::get(b.getContext(),
"ram_style",
217 }
else if (mem.readLatency == 1 && numPorts > 1) {
223 reg, sv::SVAttributeAttr::get(b.getContext(),
"rw_addr_collision",
228 SmallVector<Value, 4> outputs;
231 for (
size_t i = 0; i < mem.numReadPorts; ++i) {
232 Value
addr = op.getBody().getArgument(inArg++);
233 Value
en = op.getBody().getArgument(inArg++);
234 Value clock = op.getBody().getArgument(inArg++);
236 if (readEnableMode == ReadEnableMode::Ignore) {
237 for (
size_t j = 0, e = mem.readLatency; j != e; ++j) {
240 en = addPipelineStages(b, moduleNamespace, 1, clock, en,
241 "R" + Twine(i) +
"_en");
242 addr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
243 "R" + Twine(i) +
"_addr", enLast);
246 en = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, en,
247 "R" + Twine(i) +
"_en");
248 addr = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, addr,
249 "R" + Twine(i) +
"_addr");
254 switch (readEnableMode) {
255 case ReadEnableMode::Undefined: {
256 Value x = sv::ConstantXOp::create(b,
rdata.getType());
257 rdata = comb::MuxOp::create(b, en, rdata, x,
false);
260 case ReadEnableMode::Zero: {
262 rdata = comb::MuxOp::create(b, en, rdata, x,
false);
265 case ReadEnableMode::Ignore:
268 outputs.push_back(rdata);
271 for (
size_t i = 0; i < mem.numReadWritePorts; ++i) {
272 auto numReadStages = mem.readLatency;
273 auto numWriteStages = mem.writeLatency - 1;
274 auto numCommonStages = std::min(numReadStages, numWriteStages);
275 Value
addr = op.getBody().getArgument(inArg++);
276 Value
en = op.getBody().getArgument(inArg++);
277 Value clock = op.getBody().getArgument(inArg++);
278 Value
wmode = op.getBody().getArgument(inArg++);
279 Value wdataIn = op.getBody().getArgument(inArg++);
284 wmaskBits = op.getBody().getArgument(inArg++);
289 addr = addPipelineStages(b, moduleNamespace, numCommonStages, clock, addr,
290 "RW" + Twine(i) +
"_addr");
291 en = addPipelineStages(b, moduleNamespace, numCommonStages, clock, en,
292 "RW" + Twine(i) +
"_en");
293 wmode = addPipelineStages(b, moduleNamespace, numCommonStages, clock, wmode,
294 "RW" + Twine(i) +
"_mode");
297 Value readAddr =
addr;
299 if (readEnableMode == ReadEnableMode::Ignore) {
300 for (
size_t j = 0, e = mem.readLatency; j != e; ++j) {
303 readEn = addPipelineStages(b, moduleNamespace, 1, clock, en,
304 "RW" + Twine(i) +
"_ren");
305 readAddr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
306 "RW" + Twine(i) +
"_raddr", enLast);
310 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
311 clock, addr,
"RW" + Twine(i) +
"_raddr");
313 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
314 clock, en,
"RW" + Twine(i) +
"_ren");
317 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
318 clock, wmode,
"RW" + Twine(i) +
"_rmode");
322 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
323 clock, addr,
"RW" + Twine(i) +
"_waddr");
325 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
326 clock, en,
"RW" + Twine(i) +
"_wen");
328 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
329 clock, wmode,
"RW" + Twine(i) +
"_wmode");
330 wdataIn = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
331 wdataIn,
"RW" + Twine(i) +
"_wdata");
333 wmaskBits = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
334 wmaskBits,
"RW" + Twine(i) +
"_wmask");
336 SmallVector<Value, 4> maskValues(maskBits);
337 SmallVector<Value, 4> dataValues(maskBits);
341 for (
size_t i = 0; i < maskBits; ++i) {
343 dataValues[i] = b.createOrFold<
comb::ExtractOp>(wdataIn, i * mem.maskGran,
354 b.createOrFold<comb::ICmpOp>(
355 comb::ICmpPredicate::eq, readWMode,
356 b.createOrFold<
ConstantOp>(readWMode.getType(), 0),
false),
361 switch (readEnableMode) {
362 case ReadEnableMode::Undefined: {
363 Value x = sv::ConstantXOp::create(b, val.getType());
364 val = comb::MuxOp::create(b, rcond, val, x,
false);
367 case ReadEnableMode::Zero: {
369 val = comb::MuxOp::create(b, rcond, val, x,
false);
372 case ReadEnableMode::Ignore:
378 for (
auto wmask :
llvm::enumerate(maskValues)) {
379 sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock, [&]() {
384 sv::IfOp::create(b, wcond, [&]() {
385 Value slotReg = sv::ArrayIndexInOutOp::create(b,
reg, writeAddr);
386 sv::PAssignOp::create(
388 b.createOrFold<sv::IndexedPartSelectInOutOp>(
390 b.createOrFold<
ConstantOp>(b.getIntegerType(32),
391 wmask.index() * mem.maskGran),
393 dataValues[
wmask.index()]);
397 outputs.push_back(rdata);
400 DenseMap<unsigned, Operation *> writeProcesses;
401 for (
size_t i = 0; i < mem.numWritePorts; ++i) {
402 auto numStages = mem.writeLatency - 1;
403 Value
addr = op.getBody().getArgument(inArg++);
404 Value
en = op.getBody().getArgument(inArg++);
405 Value clock = op.getBody().getArgument(inArg++);
406 Value wdataIn = op.getBody().getArgument(inArg++);
411 wmaskBits = op.getBody().getArgument(inArg++);
415 addr = addPipelineStages(b, moduleNamespace, numStages, clock, addr,
416 "W" + Twine(i) +
"addr");
417 en = addPipelineStages(b, moduleNamespace, numStages, clock, en,
418 "W" + Twine(i) +
"en");
419 wdataIn = addPipelineStages(b, moduleNamespace, numStages, clock, wdataIn,
420 "W" + Twine(i) +
"data");
422 wmaskBits = addPipelineStages(b, moduleNamespace, numStages, clock,
423 wmaskBits,
"W" + Twine(i) +
"mask");
425 SmallVector<Value, 4> maskValues(maskBits);
426 SmallVector<Value, 4> dataValues(maskBits);
430 for (
size_t i = 0; i < maskBits; ++i) {
432 dataValues[i] = b.createOrFold<
comb::ExtractOp>(wdataIn, i * mem.maskGran,
436 auto writeLogic = [&] {
439 for (
auto wmask :
llvm::enumerate(maskValues)) {
442 sv::IfOp::create(b, wcond, [&]() {
443 auto slot = sv::ArrayIndexInOutOp::create(b,
reg, addr);
444 sv::PAssignOp::create(
446 b.createOrFold<sv::IndexedPartSelectInOutOp>(
448 b.createOrFold<
ConstantOp>(b.getIntegerType(32),
449 wmask.index() * mem.maskGran),
451 dataValues[
wmask.index()]);
457 auto alwaysBlock = [&] {
458 return sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock,
459 [&]() { writeLogic(); });
462 switch (mem.writeUnderWrite) {
465 case seq::WUW::Undefined:
470 case seq::WUW::PortOrder:
471 if (
auto *existingAlwaysBlock =
472 writeProcesses.lookup(mem.writeClockIDs[i])) {
473 OpBuilder::InsertionGuard guard(b);
474 b.setInsertionPointToEnd(
475 cast<sv::AlwaysOp>(existingAlwaysBlock).
getBodyBlock());
478 writeProcesses[i] = alwaysBlock();
483 auto *outputOp = op.getBodyBlock()->getTerminator();
484 outputOp->setOperands(outputs);
488 if (!mem.initFilename.empty()) {
490 if (!
reg.getInnerSymAttr())
491 reg.setInnerSymAttr(hw::InnerSymAttr::get(
492 b.getStringAttr(moduleNamespace.
newName(
reg.getName()))));
494 if (mem.initIsInline) {
495 sv::IfDefOp::create(b,
"ENABLE_INITIAL_MEM_", [&]() {
496 sv::InitialOp::create(b, [&]() {
497 sv::ReadMemOp::create(b,
reg, mem.initFilename,
498 mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
499 : MemBaseTypeAttr::MemBaseHex);
503 OpBuilder::InsertionGuard guard(b);
506 StringAttr boundModuleName =
507 b.getStringAttr(mlirModuleNamespace.newName(op.getName() +
"_init"));
511 if (
auto fileAttr = op->getAttrOfType<OutputFileAttr>(
"output_file")) {
512 if (!fileAttr.isDirectory()) {
513 SmallString<128> path(fileAttr.getFilename().getValue());
514 llvm::sys::path::remove_filename(path);
515 llvm::sys::path::append(path, boundModuleName.getValue() +
".sv");
516 filename = b.getStringAttr(path);
518 filename = fileAttr.getFilename();
521 filename = b.getStringAttr(boundModuleName.getValue() +
".sv");
525 b.setInsertionPointAfter(op);
527 HWModuleOp::create(b, boundModuleName, ArrayRef<PortInfo>());
530 auto path = hw::HierPathOp::create(
531 b, mlirModuleNamespace.newName(op.getName() +
"_path"),
533 ::InnerRefAttr::get(op.getNameAttr(),
reg.getInnerNameAttr())));
535 b.setInsertionPointToStart(boundModule.getBodyBlock());
536 sv::InitialOp::create(b, [&]() {
538 sv::XMRRefOp::create(b,
reg.getType(), path.getSymNameAttr());
539 sv::ReadMemOp::create(b, xmr, mem.initFilename,
540 mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
541 : MemBaseTypeAttr::MemBaseHex);
545 b.setInsertionPointAfter(
reg);
546 auto boundInstance = hw::InstanceOp::create(
547 b, boundModule, boundModule.getName(), ArrayRef<Value>());
548 boundInstance->setAttr(
550 hw::InnerSymAttr::get(b.getStringAttr(
551 moduleNamespace.
newName(boundInstance.getInstanceName()))));
552 boundInstance.setDoNotPrintAttr(b.getUnitAttr());
555 b.setInsertionPointAfter(op);
556 emit::FileOp::create(b, filename, [&] {
557 emit::RefOp::create(b, FlatSymbolRefAttr::get(boundModuleName));
558 sv::BindOp::create(b,
559 hw::InnerRefAttr::get(
561 boundInstance.getInnerSymAttr().getSymName()));
568 if (disableMemRandomization && disableRegRandomization)
571 constexpr unsigned randomWidth = 32;
572 sv::IfDefOp::create(b,
"ENABLE_INITIAL_MEM_", [&]() {
574 SmallVector<sv::RegOp> randRegs;
575 if (!disableRegRandomization) {
576 sv::IfDefOp::create(b,
"RANDOMIZE_REG_INIT", [&]() {
577 signed totalWidth = 0;
579 totalWidth +=
reg.getElementType().getIntOrFloatBitWidth();
580 while (totalWidth > 0) {
581 auto name = b.getStringAttr(moduleNamespace.
newName(
"_RANDOM"));
582 auto innerSym = hw::InnerSymAttr::get(name);
583 randRegs.push_back(sv::RegOp::create(b, b.getIntegerType(randomWidth),
585 totalWidth -= randomWidth;
589 auto randomMemReg = sv::RegOp::create(
591 b.getIntegerType(llvm::divideCeil(mem.dataWidth, randomWidth) *
593 b.getStringAttr(
"_RANDOM_MEM"));
594 sv::InitialOp::create(b, [&]() {
595 sv::VerbatimOp::create(b,
"`INIT_RANDOM_PROLOG_");
598 if (!disableMemRandomization) {
599 sv::IfDefProceduralOp::create(b,
"RANDOMIZE_MEM_INIT", [&]() {
600 auto outerLoopIndVarType =
601 b.getIntegerType(llvm::Log2_64_Ceil(mem.depth + 1));
602 auto innerUpperBoundWidth =
603 cast<IntegerType>(randomMemReg.getType().getElementType())
605 auto innerLoopIndVarType =
606 b.getIntegerType(llvm::Log2_64_Ceil(innerUpperBoundWidth + 1));
615 b, 0, mem.depth, 1, outerLoopIndVarType,
"i",
616 [&](BlockArgument outerIndVar) {
618 b, 0, innerUpperBoundWidth, randomWidth,
619 innerLoopIndVarType,
"j", [&](BlockArgument innerIndVar) {
620 auto rhs = sv::MacroRefExprSEOp::create(
621 b, b.getIntegerType(randomWidth),
"RANDOM");
622 Value truncInnerIndVar;
623 if (mem.dataWidth <= 1)
625 hw::ConstantOp::create(b, b.getI1Type(), 0);
627 truncInnerIndVar = b.createOrFold<comb::ExtractOp>(
628 innerIndVar, 0, llvm::Log2_64_Ceil(mem.dataWidth));
629 auto lhs = sv::IndexedPartSelectInOutOp::create(
630 b, randomMemReg, truncInnerIndVar, randomWidth,
632 sv::BPAssignOp::create(b, lhs, rhs);
635 Value iterValue = outerIndVar;
637 if (!outerIndVar.getType().isInteger(
638 llvm::Log2_64_Ceil(mem.depth)))
640 iterValue, 0, llvm::Log2_64_Ceil(mem.depth));
641 auto lhs = sv::ArrayIndexInOutOp::create(b,
reg, iterValue);
644 sv::BPAssignOp::create(b, lhs, rhs);
654 if (!disableRegRandomization) {
655 sv::IfDefProceduralOp::create(b,
"RANDOMIZE_REG_INIT", [&]() {
656 unsigned bits = randomWidth;
658 sv::VerbatimOp::create(
659 b, b.getStringAttr(
"{{0}} = {`RANDOM};"), ValueRange{},
660 b.getArrayAttr(
hw::InnerRefAttr::
get(op.getNameAttr(),
661 reg.getInnerNameAttr())));
664 SmallVector<std::pair<Attribute, std::pair<size_t, size_t>>> values;
665 auto width =
reg.getElementType().getIntOrFloatBitWidth();
666 auto widthRemaining = width;
667 while (widthRemaining > 0) {
668 if (bits == randomWidth) {
669 randReg = randRegs[randRegIdx++];
672 auto innerRef = hw::InnerRefAttr::get(op.getNameAttr(),
673 randReg.getInnerNameAttr());
674 if (widthRemaining <= randomWidth - bits) {
675 values.push_back({innerRef, {bits + widthRemaining - 1, bits}});
676 bits += widthRemaining;
680 values.push_back({innerRef, {randomWidth - 1, bits}});
681 widthRemaining -= (randomWidth - bits);
684 SmallString<32> rhs(
"{{0}} = ");
687 SmallVector<Attribute, 4> symbols({hw::InnerRefAttr::get(
688 op.getNameAttr(),
reg.getInnerNameAttr())});
689 if (values.size() > 1)
691 for (
auto &v : values) {
694 auto [sym, range] = v;
695 symbols.push_back(sym);
696 rhs.append((
"{{" + Twine(idx++) +
"}}").str());
698 if (range.first == randomWidth - 1 && range.second == 0)
701 if (range.first == range.second) {
702 rhs.append((
"[" + Twine(range.first) +
"]").str());
707 (
"[" + Twine(range.first) +
":" + Twine(range.second) +
"]")
710 if (values.size() > 1)
713 sv::VerbatimOp::create(b, rhs, ValueRange{},
714 b.getArrayAttr(symbols));
722void HWMemSimImplPass::runOnOperation() {
723 auto topModule = getOperation();
731 mlirModuleNamespace.
add(symbolCache);
733 SmallVector<HWModuleGeneratedOp> toErase;
734 bool anythingChanged =
false;
737 llvm::make_early_inc_range(topModule.getOps<HWModuleGeneratedOp>())) {
738 auto oldModule = cast<HWModuleGeneratedOp>(op);
739 auto gen = oldModule.getGeneratorKind();
740 auto genOp = cast<HWGeneratorSchemaOp>(
741 SymbolTable::lookupSymbolIn(getOperation(), gen));
743 if (genOp.getDescriptor() ==
"FIRRTL_Memory") {
744 FirMemory mem(oldModule);
746 OpBuilder builder(oldModule);
747 auto nameAttr = builder.getStringAttr(oldModule.getName());
752 if (replSeqMem && ((mem.readLatency == 1 && mem.writeLatency == 1) &&
753 mem.dataWidth > 0)) {
754 HWModuleExternOp::create(builder, oldModule.getLoc(), nameAttr,
755 oldModule.getPortList());
757 auto newModule = HWModuleOp::create(builder, oldModule.getLoc(),
758 nameAttr, oldModule.getPortList());
759 if (
auto outdir = oldModule->getAttr(
"output_file"))
760 newModule->setAttr(
"output_file", outdir);
761 newModule.setCommentAttr(
762 builder.getStringAttr(
"VCS coverage exclude_file"));
763 newModule.setPrivate();
765 HWMemSimImpl(readEnableMode, addMuxPragmas, disableMemRandomization,
766 disableRegRandomization,
767 addVivadoRAMAddressConflictSynthesisBugWorkaround,
769 .generateMemory(newModule, mem);
770 if (
auto fragments = oldModule->getAttr(emit::getFragmentsAttrName()))
771 newModule->setAttr(emit::getFragmentsAttrName(), fragments);
775 anythingChanged =
true;
779 if (!anythingChanged)
780 markAllAnalysesPreserved();
785 return std::make_unique<HWMemSimImplPass>(options);
assert(baseType &&"element must be base type")
static bool valueDefinedBeforeOp(Value value, Operation *op)
A helper that returns true if a value definition (or block argument) is visible to another operation,...
static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr, bool addMuxPragmas)
static Block * getBodyBlock(FModuleLike mod)
A namespace that is used to store existing names and generate new names in some scope within the IR.
void add(mlir::ModuleOp module)
StringRef newName(const Twine &name)
Return a unique name, derived from the input name, and add the new name to the internal namespace.
void addDefinitions(mlir::Operation *top)
Populate the symbol cache with all symbol-defining operations within the 'top' operation.
Default symbol cache implementation; stores associations between names (StringAttr's) to mlir::Operat...
create(data_type, name=None, sym_name=None)
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
std::unique_ptr< mlir::Pass > createHWMemSimImplPass(const HWMemSimImplOptions &options={})
void setSVAttributes(mlir::Operation *op, mlir::ArrayAttr attrs)
Set the SV attributes of an operation.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)