Loading [MathJax]/extensions/tex2jax.js
CIRCT 22.0.0git
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
HWMemSimImpl.cpp
Go to the documentation of this file.
1//===- HWMemSimImpl.cpp - HW Memory Implementation Pass -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This transformation pass converts generated FIRRTL memory modules to
10// simulation models.
11//
12//===----------------------------------------------------------------------===//
13
24#include "mlir/IR/ImplicitLocOpBuilder.h"
25#include "mlir/Pass/Pass.h"
26#include "llvm/ADT/TypeSwitch.h"
27#include "llvm/Support/Path.h"
28
29using namespace circt;
30using namespace hw;
31using namespace seq;
32
33namespace circt {
34namespace seq {
35#define GEN_PASS_DEF_HWMEMSIMIMPL
36#include "circt/Dialect/Seq/SeqPasses.h.inc"
37} // namespace seq
38} // namespace circt
39
40//===----------------------------------------------------------------------===//
41// HWMemSimImplPass Pass
42//===----------------------------------------------------------------------===//
43
44namespace {
45
46class HWMemSimImpl {
47 ReadEnableMode readEnableMode;
48 bool addMuxPragmas;
49 bool disableMemRandomization;
50 bool disableRegRandomization;
51 bool addVivadoRAMAddressConflictSynthesisBugWorkaround;
52
53 SmallVector<sv::RegOp> registers;
54
55 Value addPipelineStages(ImplicitLocOpBuilder &b,
56 hw::InnerSymbolNamespace &moduleNamespace,
57 size_t stages, Value clock, Value data,
58 const Twine &name, Value gate = {});
59 sv::AlwaysOp lastPipelineAlwaysOp;
60
61public:
62 Namespace &mlirModuleNamespace;
63
64 HWMemSimImpl(ReadEnableMode readEnableMode, bool addMuxPragmas,
65 bool disableMemRandomization, bool disableRegRandomization,
66 bool addVivadoRAMAddressConflictSynthesisBugWorkaround,
67 Namespace &mlirModuleNamespace)
68 : readEnableMode(readEnableMode), addMuxPragmas(addMuxPragmas),
69 disableMemRandomization(disableMemRandomization),
70 disableRegRandomization(disableRegRandomization),
71 addVivadoRAMAddressConflictSynthesisBugWorkaround(
72 addVivadoRAMAddressConflictSynthesisBugWorkaround),
73 mlirModuleNamespace(mlirModuleNamespace) {}
74
75 void generateMemory(HWModuleOp op, FirMemory mem);
76};
77
78struct HWMemSimImplPass : public impl::HWMemSimImplBase<HWMemSimImplPass> {
79 using HWMemSimImplBase::HWMemSimImplBase;
80
81 void runOnOperation() override;
82};
83
84} // end anonymous namespace
85
86/// A helper that returns true if a value definition (or block argument) is
87/// visible to another operation, either because it's a block argument or
88/// because the defining op is before that other op.
89static bool valueDefinedBeforeOp(Value value, Operation *op) {
90 Operation *valueOp = value.getDefiningOp();
91 Block *valueBlock =
92 valueOp ? valueOp->getBlock() : cast<BlockArgument>(value).getOwner();
93 while (op->getBlock() && op->getBlock() != valueBlock)
94 op = op->getParentOp();
95 return valueBlock == op->getBlock() &&
96 (!valueOp || valueOp->isBeforeInBlock(op));
97}
98
99//
100// Construct memory read annotated with mux pragmas in the following
101// form:
102// ```
103// wire GEN;
104// /* synopsys infer_mux_override */
105// assign GEN = memory[addr] /* cadence map_to_mux */;
106// ```
107// If `addMuxPragmas` is enabled, just return the read value without
108// annotations.
109static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr,
110 bool addMuxPragmas) {
111 auto slot = sv::ReadInOutOp::create(
112 b, sv::ArrayIndexInOutOp::create(b, memory, addr));
113 // If we don't want to add mux pragmas, just return the read value.
114 if (!addMuxPragmas ||
115 cast<hw::UnpackedArrayType>(
116 cast<hw::InOutType>(memory.getType()).getElementType())
117 .getNumElements() <= 1)
118 return slot;
120 slot, sv::SVAttributeAttr::get(b.getContext(), "cadence map_to_mux",
121 /*emitAsComment=*/true));
122 auto valWire = sv::WireOp::create(b, slot.getType());
123 auto assignOp = sv::AssignOp::create(b, valWire, slot);
124 sv::setSVAttributes(assignOp,
125 sv::SVAttributeAttr::get(b.getContext(),
126 "synopsys infer_mux_override",
127 /*emitAsComment=*/true));
128
129 return sv::ReadInOutOp::create(b, valWire);
130}
131
132Value HWMemSimImpl::addPipelineStages(ImplicitLocOpBuilder &b,
133 hw::InnerSymbolNamespace &moduleNamespace,
134 size_t stages, Value clock, Value data,
135 const Twine &name, Value gate) {
136 if (!stages)
137 return data;
138
139 // Try to reuse the previous always block. This is only possible if the clocks
140 // agree and the data and gate all dominate the always block.
141 auto alwaysOp = lastPipelineAlwaysOp;
142 if (alwaysOp) {
143 if (alwaysOp.getClocks() != ValueRange{clock} ||
144 !valueDefinedBeforeOp(data, alwaysOp) ||
145 (gate && !valueDefinedBeforeOp(gate, alwaysOp)))
146 alwaysOp = {};
147 }
148 if (!alwaysOp)
149 alwaysOp = sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock);
150
151 // Add the necessary registers.
152 auto savedIP = b.saveInsertionPoint();
153 SmallVector<sv::RegOp> regs;
154 b.setInsertionPoint(alwaysOp);
155 for (unsigned i = 0; i < stages; ++i) {
156 auto regName =
157 b.getStringAttr(moduleNamespace.newName("_" + name + "_d" + Twine(i)));
158 auto reg = sv::RegOp::create(b, data.getType(), regName,
159 hw::InnerSymAttr::get(regName));
160 regs.push_back(reg);
161 registers.push_back(reg);
162 }
163
164 // Populate the assignments in the always block.
165 b.setInsertionPointToEnd(alwaysOp.getBodyBlock());
166 for (unsigned i = 0; i < stages; ++i) {
167 if (i > 0)
168 data = sv::ReadInOutOp::create(b, data);
169 auto emitAssign = [&] { sv::PAssignOp::create(b, regs[i], data); };
170 if (gate)
171 sv::IfOp::create(b, gate, [&]() { emitAssign(); });
172 else
173 emitAssign();
174 data = regs[i];
175 gate = {};
176 }
177 b.restoreInsertionPoint(savedIP);
178 data = sv::ReadInOutOp::create(b, data);
179
180 lastPipelineAlwaysOp = alwaysOp;
181 return data;
182}
183
184void HWMemSimImpl::generateMemory(HWModuleOp op, FirMemory mem) {
185 ImplicitLocOpBuilder b(op.getLoc(), op.getBody());
186
187 InnerSymbolNamespace moduleNamespace(op);
188
189 // Compute total number of mask bits.
190 if (mem.maskGran == 0)
191 mem.maskGran = mem.dataWidth;
192 auto maskBits = mem.dataWidth / mem.maskGran;
193 bool isMasked = maskBits > 1;
194 // Each mask bit controls mask-granularity number of data bits.
195 auto dataType = b.getIntegerType(mem.dataWidth);
196
197 // Count the total number of ports.
198 unsigned numPorts =
199 mem.numReadPorts + mem.numWritePorts + mem.numReadWritePorts;
200
201 // Create registers for the memory.
202 sv::RegOp reg =
203 sv::RegOp::create(b, UnpackedArrayType::get(dataType, mem.depth),
204 b.getStringAttr("Memory"));
205
206 if (addVivadoRAMAddressConflictSynthesisBugWorkaround) {
207 if (mem.readLatency == 0) {
208 // If the read latency is zero, we regard the memory as write-first.
209 // We add a SV attribute to specify a ram style to use LUTs for Vivado
210 // to avoid a bug that miscompiles the write-first memory. See "RAM
211 // address conflict and Vivado synthesis bug" issue in the vivado forum
212 // for the more detail.
214 reg, sv::SVAttributeAttr::get(b.getContext(), "ram_style",
215 R"("distributed")",
216 /*emitAsComment=*/false));
217 } else if (mem.readLatency == 1 && numPorts > 1) {
218 // If the read address is registered and the RAM has multiple ports,
219 // force write-first behaviour by setting rw_addr_collision. This avoids
220 // unpredictable behaviour. Downstreams flows should watch for `VPL
221 // 8-6430`.
223 reg, sv::SVAttributeAttr::get(b.getContext(), "rw_addr_collision",
224 R"("yes")", /*emitAsComment=*/false));
225 }
226 }
227
228 SmallVector<Value, 4> outputs;
229
230 size_t inArg = 0;
231 for (size_t i = 0; i < mem.numReadPorts; ++i) {
232 Value addr = op.getBody().getArgument(inArg++);
233 Value en = op.getBody().getArgument(inArg++);
234 Value clock = op.getBody().getArgument(inArg++);
235 // Add pipeline stages
236 if (readEnableMode == ReadEnableMode::Ignore) {
237 for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
238 auto enLast = en;
239 if (j < e - 1)
240 en = addPipelineStages(b, moduleNamespace, 1, clock, en,
241 "R" + Twine(i) + "_en");
242 addr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
243 "R" + Twine(i) + "_addr", enLast);
244 }
245 } else {
246 en = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, en,
247 "R" + Twine(i) + "_en");
248 addr = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, addr,
249 "R" + Twine(i) + "_addr");
250 }
251
252 // Read Logic
253 Value rdata = getMemoryRead(b, reg, addr, addMuxPragmas);
254 switch (readEnableMode) {
255 case ReadEnableMode::Undefined: {
256 Value x = sv::ConstantXOp::create(b, rdata.getType());
257 rdata = comb::MuxOp::create(b, en, rdata, x, false);
258 break;
259 }
260 case ReadEnableMode::Zero: {
261 Value x = hw::ConstantOp::create(b, rdata.getType(), 0);
262 rdata = comb::MuxOp::create(b, en, rdata, x, false);
263 break;
264 }
265 case ReadEnableMode::Ignore:
266 break;
267 }
268 outputs.push_back(rdata);
269 }
270
271 for (size_t i = 0; i < mem.numReadWritePorts; ++i) {
272 auto numReadStages = mem.readLatency;
273 auto numWriteStages = mem.writeLatency - 1;
274 auto numCommonStages = std::min(numReadStages, numWriteStages);
275 Value addr = op.getBody().getArgument(inArg++);
276 Value en = op.getBody().getArgument(inArg++);
277 Value clock = op.getBody().getArgument(inArg++);
278 Value wmode = op.getBody().getArgument(inArg++);
279 Value wdataIn = op.getBody().getArgument(inArg++);
280 Value wmaskBits;
281 // There are no input mask ports, if maskBits =1. Create a dummy true value
282 // for mask.
283 if (isMasked)
284 wmaskBits = op.getBody().getArgument(inArg++);
285 else
286 wmaskBits = ConstantOp::create(b, b.getIntegerAttr(en.getType(), 1));
287
288 // Add common pipeline stages.
289 addr = addPipelineStages(b, moduleNamespace, numCommonStages, clock, addr,
290 "RW" + Twine(i) + "_addr");
291 en = addPipelineStages(b, moduleNamespace, numCommonStages, clock, en,
292 "RW" + Twine(i) + "_en");
293 wmode = addPipelineStages(b, moduleNamespace, numCommonStages, clock, wmode,
294 "RW" + Twine(i) + "_mode");
295
296 // Add read-only pipeline stages.
297 Value readAddr = addr;
298 Value readEn = en;
299 if (readEnableMode == ReadEnableMode::Ignore) {
300 for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
301 auto enLast = en;
302 if (j < e - 1)
303 readEn = addPipelineStages(b, moduleNamespace, 1, clock, en,
304 "RW" + Twine(i) + "_ren");
305 readAddr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
306 "RW" + Twine(i) + "_raddr", enLast);
307 }
308 } else {
309 readAddr =
310 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
311 clock, addr, "RW" + Twine(i) + "_raddr");
312 readEn =
313 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
314 clock, en, "RW" + Twine(i) + "_ren");
315 }
316 auto readWMode =
317 addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
318 clock, wmode, "RW" + Twine(i) + "_rmode");
319
320 // Add write-only pipeline stages.
321 auto writeAddr =
322 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
323 clock, addr, "RW" + Twine(i) + "_waddr");
324 auto writeEn =
325 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
326 clock, en, "RW" + Twine(i) + "_wen");
327 auto writeWMode =
328 addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
329 clock, wmode, "RW" + Twine(i) + "_wmode");
330 wdataIn = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
331 wdataIn, "RW" + Twine(i) + "_wdata");
332 if (isMasked)
333 wmaskBits = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
334 wmaskBits, "RW" + Twine(i) + "_wmask");
335
336 SmallVector<Value, 4> maskValues(maskBits);
337 SmallVector<Value, 4> dataValues(maskBits);
338 // For multi-bit mask, extract corresponding write data bits of
339 // mask-granularity size each. Each of the extracted data bits will be
340 // written to a register, gaurded by the corresponding mask bit.
341 for (size_t i = 0; i < maskBits; ++i) {
342 maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
343 dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
344 mem.maskGran);
345 }
346
347 // wire to store read result
348 auto rWire = sv::WireOp::create(b, wdataIn.getType());
349 Value rdata = sv::ReadInOutOp::create(b, rWire);
350
351 // Read logic.
352 Value rcond = b.createOrFold<comb::AndOp>(
353 readEn,
354 b.createOrFold<comb::ICmpOp>(
355 comb::ICmpPredicate::eq, readWMode,
356 b.createOrFold<ConstantOp>(readWMode.getType(), 0), false),
357 false);
358
359 auto val = getMemoryRead(b, reg, readAddr, addMuxPragmas);
360
361 switch (readEnableMode) {
362 case ReadEnableMode::Undefined: {
363 Value x = sv::ConstantXOp::create(b, val.getType());
364 val = comb::MuxOp::create(b, rcond, val, x, false);
365 break;
366 }
367 case ReadEnableMode::Zero: {
368 Value x = hw::ConstantOp::create(b, val.getType(), 0);
369 val = comb::MuxOp::create(b, rcond, val, x, false);
370 break;
371 }
372 case ReadEnableMode::Ignore:
373 break;
374 }
375 sv::AssignOp::create(b, rWire, val);
376
377 // Write logic gaurded by the corresponding mask bit.
378 for (auto wmask : llvm::enumerate(maskValues)) {
379 sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock, [&]() {
380 auto wcond = b.createOrFold<comb::AndOp>(
381 writeEn,
382 b.createOrFold<comb::AndOp>(wmask.value(), writeWMode, false),
383 false);
384 sv::IfOp::create(b, wcond, [&]() {
385 Value slotReg = sv::ArrayIndexInOutOp::create(b, reg, writeAddr);
386 sv::PAssignOp::create(
387 b,
388 b.createOrFold<sv::IndexedPartSelectInOutOp>(
389 slotReg,
390 b.createOrFold<ConstantOp>(b.getIntegerType(32),
391 wmask.index() * mem.maskGran),
392 mem.maskGran),
393 dataValues[wmask.index()]);
394 });
395 });
396 }
397 outputs.push_back(rdata);
398 }
399
400 DenseMap<unsigned, Operation *> writeProcesses;
401 for (size_t i = 0; i < mem.numWritePorts; ++i) {
402 auto numStages = mem.writeLatency - 1;
403 Value addr = op.getBody().getArgument(inArg++);
404 Value en = op.getBody().getArgument(inArg++);
405 Value clock = op.getBody().getArgument(inArg++);
406 Value wdataIn = op.getBody().getArgument(inArg++);
407 Value wmaskBits;
408 // There are no input mask ports, if maskBits =1. Create a dummy true value
409 // for mask.
410 if (isMasked)
411 wmaskBits = op.getBody().getArgument(inArg++);
412 else
413 wmaskBits = ConstantOp::create(b, b.getIntegerAttr(en.getType(), 1));
414 // Add pipeline stages
415 addr = addPipelineStages(b, moduleNamespace, numStages, clock, addr,
416 "W" + Twine(i) + "addr");
417 en = addPipelineStages(b, moduleNamespace, numStages, clock, en,
418 "W" + Twine(i) + "en");
419 wdataIn = addPipelineStages(b, moduleNamespace, numStages, clock, wdataIn,
420 "W" + Twine(i) + "data");
421 if (isMasked)
422 wmaskBits = addPipelineStages(b, moduleNamespace, numStages, clock,
423 wmaskBits, "W" + Twine(i) + "mask");
424
425 SmallVector<Value, 4> maskValues(maskBits);
426 SmallVector<Value, 4> dataValues(maskBits);
427 // For multi-bit mask, extract corresponding write data bits of
428 // mask-granularity size each. Each of the extracted data bits will be
429 // written to a register, gaurded by the corresponding mask bit.
430 for (size_t i = 0; i < maskBits; ++i) {
431 maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
432 dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
433 mem.maskGran);
434 }
435 // Build write port logic.
436 auto writeLogic = [&] {
437 // For each register, create the connections to write the corresponding
438 // data into it.
439 for (auto wmask : llvm::enumerate(maskValues)) {
440 // Guard by corresponding mask bit.
441 auto wcond = b.createOrFold<comb::AndOp>(en, wmask.value(), false);
442 sv::IfOp::create(b, wcond, [&]() {
443 auto slot = sv::ArrayIndexInOutOp::create(b, reg, addr);
444 sv::PAssignOp::create(
445 b,
446 b.createOrFold<sv::IndexedPartSelectInOutOp>(
447 slot,
448 b.createOrFold<ConstantOp>(b.getIntegerType(32),
449 wmask.index() * mem.maskGran),
450 mem.maskGran),
451 dataValues[wmask.index()]);
452 });
453 }
454 };
455
456 // Build a new always block with write port logic.
457 auto alwaysBlock = [&] {
458 return sv::AlwaysOp::create(b, sv::EventControl::AtPosEdge, clock,
459 [&]() { writeLogic(); });
460 };
461
462 switch (mem.writeUnderWrite) {
463 // Undefined write order: lower each write port into a separate always
464 // block.
465 case seq::WUW::Undefined:
466 alwaysBlock();
467 break;
468 // Port-ordered write order: lower each write port into an always block
469 // based on its clock ID.
470 case seq::WUW::PortOrder:
471 if (auto *existingAlwaysBlock =
472 writeProcesses.lookup(mem.writeClockIDs[i])) {
473 OpBuilder::InsertionGuard guard(b);
474 b.setInsertionPointToEnd(
475 cast<sv::AlwaysOp>(existingAlwaysBlock).getBodyBlock());
476 writeLogic();
477 } else {
478 writeProcesses[i] = alwaysBlock();
479 }
480 }
481 }
482
483 auto *outputOp = op.getBodyBlock()->getTerminator();
484 outputOp->setOperands(outputs);
485
486 // Add logic to initialize the memory based on a file emission request. This
487 // disables randomization.
488 if (!mem.initFilename.empty()) {
489 // Set an inner symbol on the register if one does not exist.
490 if (!reg.getInnerSymAttr())
491 reg.setInnerSymAttr(hw::InnerSymAttr::get(
492 b.getStringAttr(moduleNamespace.newName(reg.getName()))));
493
494 if (mem.initIsInline) {
495 sv::IfDefOp::create(b, "ENABLE_INITIAL_MEM_", [&]() {
496 sv::InitialOp::create(b, [&]() {
497 sv::ReadMemOp::create(b, reg, mem.initFilename,
498 mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
499 : MemBaseTypeAttr::MemBaseHex);
500 });
501 });
502 } else {
503 OpBuilder::InsertionGuard guard(b);
504
505 // Assign a name to the bound module.
506 StringAttr boundModuleName =
507 b.getStringAttr(mlirModuleNamespace.newName(op.getName() + "_init"));
508
509 // Generate a name for the file containing the bound module and the bind.
510 StringAttr filename;
511 if (auto fileAttr = op->getAttrOfType<OutputFileAttr>("output_file")) {
512 if (!fileAttr.isDirectory()) {
513 SmallString<128> path(fileAttr.getFilename().getValue());
514 llvm::sys::path::remove_filename(path);
515 llvm::sys::path::append(path, boundModuleName.getValue() + ".sv");
516 filename = b.getStringAttr(path);
517 } else {
518 filename = fileAttr.getFilename();
519 }
520 } else {
521 filename = b.getStringAttr(boundModuleName.getValue() + ".sv");
522 }
523
524 // Create a new module with the readmem op.
525 b.setInsertionPointAfter(op);
526 auto boundModule =
527 HWModuleOp::create(b, boundModuleName, ArrayRef<PortInfo>());
528
529 // Build the hierpathop
530 auto path = hw::HierPathOp::create(
531 b, mlirModuleNamespace.newName(op.getName() + "_path"),
532 b.getArrayAttr(
533 ::InnerRefAttr::get(op.getNameAttr(), reg.getInnerNameAttr())));
534
535 b.setInsertionPointToStart(boundModule.getBodyBlock());
536 sv::InitialOp::create(b, [&]() {
537 auto xmr =
538 sv::XMRRefOp::create(b, reg.getType(), path.getSymNameAttr());
539 sv::ReadMemOp::create(b, xmr, mem.initFilename,
540 mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
541 : MemBaseTypeAttr::MemBaseHex);
542 });
543
544 // Instantiate this new module inside the memory module.
545 b.setInsertionPointAfter(reg);
546 auto boundInstance = hw::InstanceOp::create(
547 b, boundModule, boundModule.getName(), ArrayRef<Value>());
548 boundInstance->setAttr(
549 "inner_sym",
550 hw::InnerSymAttr::get(b.getStringAttr(
551 moduleNamespace.newName(boundInstance.getInstanceName()))));
552 boundInstance.setDoNotPrintAttr(b.getUnitAttr());
553
554 // Build the file container and reference the module from it.
555 b.setInsertionPointAfter(op);
556 emit::FileOp::create(b, filename, [&] {
557 emit::RefOp::create(b, FlatSymbolRefAttr::get(boundModuleName));
558 sv::BindOp::create(b,
559 hw::InnerRefAttr::get(
560 op.getNameAttr(),
561 boundInstance.getInnerSymAttr().getSymName()));
562 });
563 }
564 }
565
566 // Add logic to initialize the memory and any internal registers to random
567 // values.
568 if (disableMemRandomization && disableRegRandomization)
569 return;
570
571 constexpr unsigned randomWidth = 32;
572 sv::IfDefOp::create(b, "ENABLE_INITIAL_MEM_", [&]() {
573 sv::RegOp randReg;
574 SmallVector<sv::RegOp> randRegs;
575 if (!disableRegRandomization) {
576 sv::IfDefOp::create(b, "RANDOMIZE_REG_INIT", [&]() {
577 signed totalWidth = 0;
578 for (sv::RegOp &reg : registers)
579 totalWidth += reg.getElementType().getIntOrFloatBitWidth();
580 while (totalWidth > 0) {
581 auto name = b.getStringAttr(moduleNamespace.newName("_RANDOM"));
582 auto innerSym = hw::InnerSymAttr::get(name);
583 randRegs.push_back(sv::RegOp::create(b, b.getIntegerType(randomWidth),
584 name, innerSym));
585 totalWidth -= randomWidth;
586 }
587 });
588 }
589 auto randomMemReg = sv::RegOp::create(
590 b,
591 b.getIntegerType(llvm::divideCeil(mem.dataWidth, randomWidth) *
592 randomWidth),
593 b.getStringAttr("_RANDOM_MEM"));
594 sv::InitialOp::create(b, [&]() {
595 sv::VerbatimOp::create(b, "`INIT_RANDOM_PROLOG_");
596
597 // Memory randomization logic. The entire memory is randomized.
598 if (!disableMemRandomization) {
599 sv::IfDefProceduralOp::create(b, "RANDOMIZE_MEM_INIT", [&]() {
600 auto outerLoopIndVarType =
601 b.getIntegerType(llvm::Log2_64_Ceil(mem.depth + 1));
602 auto innerUpperBoundWidth =
603 cast<IntegerType>(randomMemReg.getType().getElementType())
604 .getWidth();
605 auto innerLoopIndVarType =
606 b.getIntegerType(llvm::Log2_64_Ceil(innerUpperBoundWidth + 1));
607 // Construct the following nested for loops:
608 // ```
609 // for (int i = 0; i < mem.depth; i++) begin
610 // for (int j = 0; j < randomMeg.size; j += 32)
611 // randomMem[j[mem.width-1: +: 32] = `RANDOM
612 // Memory[i] = randomMem[mem.dataWidth - 1: 0];
613 // ```
614 sv::ForOp::create(
615 b, 0, mem.depth, 1, outerLoopIndVarType, "i",
616 [&](BlockArgument outerIndVar) {
617 sv::ForOp::create(
618 b, 0, innerUpperBoundWidth, randomWidth,
619 innerLoopIndVarType, "j", [&](BlockArgument innerIndVar) {
620 auto rhs = sv::MacroRefExprSEOp::create(
621 b, b.getIntegerType(randomWidth), "RANDOM");
622 Value truncInnerIndVar;
623 if (mem.dataWidth <= 1)
624 truncInnerIndVar =
625 hw::ConstantOp::create(b, b.getI1Type(), 0);
626 else
627 truncInnerIndVar = b.createOrFold<comb::ExtractOp>(
628 innerIndVar, 0, llvm::Log2_64_Ceil(mem.dataWidth));
629 auto lhs = sv::IndexedPartSelectInOutOp::create(
630 b, randomMemReg, truncInnerIndVar, randomWidth,
631 false);
632 sv::BPAssignOp::create(b, lhs, rhs);
633 });
634
635 Value iterValue = outerIndVar;
636 // Truncate the induction variable if necessary.
637 if (!outerIndVar.getType().isInteger(
638 llvm::Log2_64_Ceil(mem.depth)))
639 iterValue = b.createOrFold<comb::ExtractOp>(
640 iterValue, 0, llvm::Log2_64_Ceil(mem.depth));
641 auto lhs = sv::ArrayIndexInOutOp::create(b, reg, iterValue);
642 auto rhs = b.createOrFold<comb::ExtractOp>(
643 sv::ReadInOutOp::create(b, randomMemReg), 0, mem.dataWidth);
644 sv::BPAssignOp::create(b, lhs, rhs);
645 });
646 });
647 }
648
649 // Register randomization logic. Randomize every register to a random
650 // making efficient use of available randomization registers.
651 //
652 // TODO: This shares a lot of common logic with LowerToHW. Combine
653 // these two in a common randomization utility.
654 if (!disableRegRandomization) {
655 sv::IfDefProceduralOp::create(b, "RANDOMIZE_REG_INIT", [&]() {
656 unsigned bits = randomWidth;
657 for (sv::RegOp &reg : randRegs)
658 sv::VerbatimOp::create(
659 b, b.getStringAttr("{{0}} = {`RANDOM};"), ValueRange{},
660 b.getArrayAttr(hw::InnerRefAttr::get(op.getNameAttr(),
661 reg.getInnerNameAttr())));
662 auto randRegIdx = 0;
663 for (sv::RegOp &reg : registers) {
664 SmallVector<std::pair<Attribute, std::pair<size_t, size_t>>> values;
665 auto width = reg.getElementType().getIntOrFloatBitWidth();
666 auto widthRemaining = width;
667 while (widthRemaining > 0) {
668 if (bits == randomWidth) {
669 randReg = randRegs[randRegIdx++];
670 bits = 0;
671 }
672 auto innerRef = hw::InnerRefAttr::get(op.getNameAttr(),
673 randReg.getInnerNameAttr());
674 if (widthRemaining <= randomWidth - bits) {
675 values.push_back({innerRef, {bits + widthRemaining - 1, bits}});
676 bits += widthRemaining;
677 widthRemaining = 0;
678 continue;
679 }
680 values.push_back({innerRef, {randomWidth - 1, bits}});
681 widthRemaining -= (randomWidth - bits);
682 bits = randomWidth;
683 }
684 SmallString<32> rhs("{{0}} = ");
685 unsigned idx = 1;
686 assert(reg.getInnerSymAttr());
687 SmallVector<Attribute, 4> symbols({hw::InnerRefAttr::get(
688 op.getNameAttr(), reg.getInnerNameAttr())});
689 if (values.size() > 1)
690 rhs.append("{");
691 for (auto &v : values) {
692 if (idx > 1)
693 rhs.append(", ");
694 auto [sym, range] = v;
695 symbols.push_back(sym);
696 rhs.append(("{{" + Twine(idx++) + "}}").str());
697 // Do not emit a part select as the whole value is used.
698 if (range.first == randomWidth - 1 && range.second == 0)
699 continue;
700 // Emit a single bit part select, e.g., "[3]"
701 if (range.first == range.second) {
702 rhs.append(("[" + Twine(range.first) + "]").str());
703 continue;
704 }
705 // Emit a part select, e.g., "[4:2]"
706 rhs.append(
707 ("[" + Twine(range.first) + ":" + Twine(range.second) + "]")
708 .str());
709 }
710 if (values.size() > 1)
711 rhs.append("}");
712 rhs.append(";");
713 sv::VerbatimOp::create(b, rhs, ValueRange{},
714 b.getArrayAttr(symbols));
715 }
716 });
717 }
718 });
719 });
720}
721
722void HWMemSimImplPass::runOnOperation() {
723 auto topModule = getOperation();
724
725 // Populate a namespace from the symbols visible to the top-level MLIR module.
726 // Memories with initializations create modules and these need to be legal
727 // symbols.
728 SymbolCache symbolCache;
729 symbolCache.addDefinitions(topModule);
730 Namespace mlirModuleNamespace;
731 mlirModuleNamespace.add(symbolCache);
732
733 SmallVector<HWModuleGeneratedOp> toErase;
734 bool anythingChanged = false;
735
736 for (auto op :
737 llvm::make_early_inc_range(topModule.getOps<HWModuleGeneratedOp>())) {
738 auto oldModule = cast<HWModuleGeneratedOp>(op);
739 auto gen = oldModule.getGeneratorKind();
740 auto genOp = cast<HWGeneratorSchemaOp>(
741 SymbolTable::lookupSymbolIn(getOperation(), gen));
742
743 if (genOp.getDescriptor() == "FIRRTL_Memory") {
744 FirMemory mem(oldModule);
745
746 OpBuilder builder(oldModule);
747 auto nameAttr = builder.getStringAttr(oldModule.getName());
748
749 // The requirements for macro replacement:
750 // 1. read latency and write latency of one.
751 // 2. undefined read-under-write behavior.
752 if (replSeqMem && ((mem.readLatency == 1 && mem.writeLatency == 1) &&
753 mem.dataWidth > 0)) {
754 HWModuleExternOp::create(builder, oldModule.getLoc(), nameAttr,
755 oldModule.getPortList());
756 } else {
757 auto newModule = HWModuleOp::create(builder, oldModule.getLoc(),
758 nameAttr, oldModule.getPortList());
759 if (auto outdir = oldModule->getAttr("output_file"))
760 newModule->setAttr("output_file", outdir);
761 newModule.setCommentAttr(
762 builder.getStringAttr("VCS coverage exclude_file"));
763 newModule.setPrivate();
764
765 HWMemSimImpl(readEnableMode, addMuxPragmas, disableMemRandomization,
766 disableRegRandomization,
767 addVivadoRAMAddressConflictSynthesisBugWorkaround,
768 mlirModuleNamespace)
769 .generateMemory(newModule, mem);
770 if (auto fragments = oldModule->getAttr(emit::getFragmentsAttrName()))
771 newModule->setAttr(emit::getFragmentsAttrName(), fragments);
772 }
773
774 oldModule.erase();
775 anythingChanged = true;
776 }
777 }
778
779 if (!anythingChanged)
780 markAllAnalysesPreserved();
781}
782
783std::unique_ptr<Pass>
784circt::seq::createHWMemSimImplPass(const HWMemSimImplOptions &options) {
785 return std::make_unique<HWMemSimImplPass>(options);
786}
assert(baseType &&"element must be base type")
static bool valueDefinedBeforeOp(Value value, Operation *op)
A helper that returns true if a value definition (or block argument) is visible to another operation,...
static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr, bool addMuxPragmas)
static Block * getBodyBlock(FModuleLike mod)
A namespace that is used to store existing names and generate new names in some scope within the IR.
Definition Namespace.h:30
void add(mlir::ModuleOp module)
Definition Namespace.h:48
StringRef newName(const Twine &name)
Return a unique name, derived from the input name, and add the new name to the internal namespace.
Definition Namespace.h:87
void addDefinitions(mlir::Operation *top)
Populate the symbol cache with all symbol-defining operations within the 'top' operation.
Definition SymCache.cpp:23
Default symbol cache implementation; stores associations between names (StringAttr's) to mlir::Operat...
Definition SymCache.h:85
create(data_type, value)
Definition hw.py:433
create(dest, src)
Definition sv.py:98
create(value)
Definition sv.py:106
Definition sv.py:68
create(data_type, name=None, sym_name=None)
Definition sv.py:61
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition CalyxOps.cpp:55
std::unique_ptr< mlir::Pass > createHWMemSimImplPass(const HWMemSimImplOptions &options={})
void setSVAttributes(mlir::Operation *op, mlir::ArrayAttr attrs)
Set the SV attributes of an operation.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition hw.py:1
Definition seq.py:1
reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)
Definition seq.py:21
Definition sv.py:1