CIRCT  18.0.0git
HWMemSimImpl.cpp
Go to the documentation of this file.
1 //===- HWMemSimImpl.cpp - HW Memory Implementation Pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This transformation pass converts generated FIRRTL memory modules to
10 // simulation models.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetails.h"
17 #include "circt/Dialect/HW/HWOps.h"
20 #include "circt/Dialect/SV/SVOps.h"
23 #include "mlir/IR/ImplicitLocOpBuilder.h"
24 #include "llvm/ADT/TypeSwitch.h"
25 #include "llvm/Support/Path.h"
26 
27 using namespace circt;
28 using namespace hw;
29 using namespace seq;
30 
31 namespace circt {
32 namespace seq {
33 #define GEN_PASS_DEF_HWMEMSIMIMPL
34 #include "circt/Dialect/Seq/SeqPasses.h.inc"
35 } // namespace seq
36 } // namespace circt
37 
38 //===----------------------------------------------------------------------===//
39 // HWMemSimImplPass Pass
40 //===----------------------------------------------------------------------===//
41 
42 namespace {
43 
44 class HWMemSimImpl {
45  bool ignoreReadEnable;
46  bool addMuxPragmas;
47  bool disableMemRandomization;
48  bool disableRegRandomization;
49  bool addVivadoRAMAddressConflictSynthesisBugWorkaround;
50 
51  SmallVector<sv::RegOp> registers;
52 
53  Value addPipelineStages(ImplicitLocOpBuilder &b,
54  hw::InnerSymbolNamespace &moduleNamespace,
55  size_t stages, Value clock, Value data,
56  const Twine &name, Value gate = {});
57  sv::AlwaysOp lastPipelineAlwaysOp;
58 
59 public:
60  Namespace &mlirModuleNamespace;
61 
62  HWMemSimImpl(bool ignoreReadEnable, bool addMuxPragmas,
63  bool disableMemRandomization, bool disableRegRandomization,
64  bool addVivadoRAMAddressConflictSynthesisBugWorkaround,
65  Namespace &mlirModuleNamespace)
66  : ignoreReadEnable(ignoreReadEnable), addMuxPragmas(addMuxPragmas),
67  disableMemRandomization(disableMemRandomization),
68  disableRegRandomization(disableRegRandomization),
69  addVivadoRAMAddressConflictSynthesisBugWorkaround(
70  addVivadoRAMAddressConflictSynthesisBugWorkaround),
71  mlirModuleNamespace(mlirModuleNamespace) {}
72 
73  void generateMemory(HWModuleOp op, FirMemory mem);
74 };
75 
76 struct HWMemSimImplPass : public impl::HWMemSimImplBase<HWMemSimImplPass> {
77  using HWMemSimImplBase::HWMemSimImplBase;
78 
79  void runOnOperation() override;
80 };
81 
82 } // end anonymous namespace
83 
84 /// A helper that returns true if a value definition (or block argument) is
85 /// visible to another operation, either because it's a block argument or
86 /// because the defining op is before that other op.
87 static bool valueDefinedBeforeOp(Value value, Operation *op) {
88  Operation *valueOp = value.getDefiningOp();
89  Block *valueBlock =
90  valueOp ? valueOp->getBlock() : value.cast<BlockArgument>().getOwner();
91  while (op->getBlock() && op->getBlock() != valueBlock)
92  op = op->getParentOp();
93  return valueBlock == op->getBlock() &&
94  (!valueOp || valueOp->isBeforeInBlock(op));
95 }
96 
97 //
98 // Construct memory read annotated with mux pragmas in the following
99 // form:
100 // ```
101 // wire GEN;
102 // /* synopsys infer_mux_override */
103 // assign GEN = memory[addr] /* cadence map_to_mux */;
104 // ```
105 // If `addMuxPragmas` is enabled, just return the read value without
106 // annotations.
107 static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr,
108  bool addMuxPragmas) {
109  auto slot =
110  b.create<sv::ReadInOutOp>(b.create<sv::ArrayIndexInOutOp>(memory, addr));
111  // If we don't want to add mux pragmas, just return the read value.
112  if (!addMuxPragmas || memory.getType()
113  .cast<hw::InOutType>()
114  .getElementType()
115  .cast<hw::UnpackedArrayType>()
116  .getNumElements() <= 1)
117  return slot;
119  slot, sv::SVAttributeAttr::get(b.getContext(), "cadence map_to_mux",
120  /*emitAsComment=*/true));
121  auto valWire = b.create<sv::WireOp>(slot.getType());
122  auto assignOp = b.create<sv::AssignOp>(valWire, slot);
123  sv::setSVAttributes(assignOp,
124  sv::SVAttributeAttr::get(b.getContext(),
125  "synopsys infer_mux_override",
126  /*emitAsComment=*/true));
127 
128  return b.create<sv::ReadInOutOp>(valWire);
129 }
130 
131 Value HWMemSimImpl::addPipelineStages(ImplicitLocOpBuilder &b,
132  hw::InnerSymbolNamespace &moduleNamespace,
133  size_t stages, Value clock, Value data,
134  const Twine &name, Value gate) {
135  if (!stages)
136  return data;
137 
138  // Try to reuse the previous always block. This is only possible if the clocks
139  // agree and the data and gate all dominate the always block.
140  auto alwaysOp = lastPipelineAlwaysOp;
141  if (alwaysOp) {
142  if (alwaysOp.getClocks() != ValueRange{clock} ||
143  !valueDefinedBeforeOp(data, alwaysOp) ||
144  (gate && !valueDefinedBeforeOp(gate, alwaysOp)))
145  alwaysOp = {};
146  }
147  if (!alwaysOp)
148  alwaysOp = b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock);
149 
150  // Add the necessary registers.
151  auto savedIP = b.saveInsertionPoint();
152  SmallVector<sv::RegOp> regs;
153  b.setInsertionPoint(alwaysOp);
154  for (unsigned i = 0; i < stages; ++i) {
155  auto regName =
156  b.getStringAttr(moduleNamespace.newName("_" + name + "_d" + Twine(i)));
157  auto reg = b.create<sv::RegOp>(data.getType(), regName,
158  hw::InnerSymAttr::get(regName));
159  regs.push_back(reg);
160  registers.push_back(reg);
161  }
162 
163  // Populate the assignments in the always block.
164  b.setInsertionPointToEnd(alwaysOp.getBodyBlock());
165  for (unsigned i = 0; i < stages; ++i) {
166  if (i > 0)
167  data = b.create<sv::ReadInOutOp>(data);
168  auto emitAssign = [&] { b.create<sv::PAssignOp>(regs[i], data); };
169  if (gate)
170  b.create<sv::IfOp>(gate, [&]() { emitAssign(); });
171  else
172  emitAssign();
173  data = regs[i];
174  gate = {};
175  }
176  b.restoreInsertionPoint(savedIP);
177  data = b.create<sv::ReadInOutOp>(data);
178 
179  lastPipelineAlwaysOp = alwaysOp;
180  return data;
181 }
182 
183 void HWMemSimImpl::generateMemory(HWModuleOp op, FirMemory mem) {
184  ImplicitLocOpBuilder b(op.getLoc(), op.getBody());
185 
186  InnerSymbolNamespace moduleNamespace(op);
187 
188  // Compute total number of mask bits.
189  if (mem.maskGran == 0)
190  mem.maskGran = mem.dataWidth;
191  auto maskBits = mem.dataWidth / mem.maskGran;
192  bool isMasked = maskBits > 1;
193  // Each mask bit controls mask-granularity number of data bits.
194  auto dataType = b.getIntegerType(mem.dataWidth);
195 
196  // Count the total number of ports.
197  unsigned numPorts =
198  mem.numReadPorts + mem.numWritePorts + mem.numReadWritePorts;
199 
200  // Create registers for the memory.
201  sv::RegOp reg = b.create<sv::RegOp>(
202  UnpackedArrayType::get(dataType, mem.depth), b.getStringAttr("Memory"));
203 
204  if (addVivadoRAMAddressConflictSynthesisBugWorkaround) {
205  if (mem.readLatency == 0) {
206  // If the read latency is zero, we regard the memory as write-first.
207  // We add a SV attribute to specify a ram style to use LUTs for Vivado
208  // to avoid a bug that miscompiles the write-first memory. See "RAM
209  // address conflict and Vivado synthesis bug" issue in the vivado forum
210  // for the more detail.
212  reg, sv::SVAttributeAttr::get(b.getContext(), "ram_style",
213  R"("distributed")",
214  /*emitAsComment=*/false));
215  } else if (mem.readLatency == 1 && numPorts > 1) {
216  // If the read address is registered and the RAM has multiple ports,
217  // force write-first behaviour by setting rw_addr_collision. This avoids
218  // unpredictable behaviour. Downstreams flows should watch for `VPL
219  // 8-6430`.
221  reg, sv::SVAttributeAttr::get(b.getContext(), "rw_addr_collision",
222  R"("yes")", /*emitAsComment=*/false));
223  }
224  }
225 
226  SmallVector<Value, 4> outputs;
227 
228  size_t inArg = 0;
229  for (size_t i = 0; i < mem.numReadPorts; ++i) {
230  Value addr = op.getBody().getArgument(inArg++);
231  Value en = op.getBody().getArgument(inArg++);
232  Value clock = op.getBody().getArgument(inArg++);
233  // Add pipeline stages
234  if (ignoreReadEnable) {
235  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
236  auto enLast = en;
237  if (j < e - 1)
238  en = addPipelineStages(b, moduleNamespace, 1, clock, en,
239  "R" + Twine(i) + "_en");
240  addr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
241  "R" + Twine(i) + "_addr", enLast);
242  }
243  } else {
244  en = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, en,
245  "R" + Twine(i) + "_en");
246  addr = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, addr,
247  "R" + Twine(i) + "_addr");
248  }
249 
250  // Read Logic
251  Value rdata = getMemoryRead(b, reg, addr, addMuxPragmas);
252  if (!ignoreReadEnable) {
253  Value x = b.create<sv::ConstantXOp>(rdata.getType());
254  rdata = b.create<comb::MuxOp>(en, rdata, x, false);
255  }
256  outputs.push_back(rdata);
257  }
258 
259  for (size_t i = 0; i < mem.numReadWritePorts; ++i) {
260  auto numReadStages = mem.readLatency;
261  auto numWriteStages = mem.writeLatency - 1;
262  auto numCommonStages = std::min(numReadStages, numWriteStages);
263  Value addr = op.getBody().getArgument(inArg++);
264  Value en = op.getBody().getArgument(inArg++);
265  Value clock = op.getBody().getArgument(inArg++);
266  Value wmode = op.getBody().getArgument(inArg++);
267  Value wdataIn = op.getBody().getArgument(inArg++);
268  Value wmaskBits;
269  // There are no input mask ports, if maskBits =1. Create a dummy true value
270  // for mask.
271  if (isMasked)
272  wmaskBits = op.getBody().getArgument(inArg++);
273  else
274  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
275 
276  // Add common pipeline stages.
277  addr = addPipelineStages(b, moduleNamespace, numCommonStages, clock, addr,
278  "RW" + Twine(i) + "_addr");
279  en = addPipelineStages(b, moduleNamespace, numCommonStages, clock, en,
280  "RW" + Twine(i) + "_en");
281  wmode = addPipelineStages(b, moduleNamespace, numCommonStages, clock, wmode,
282  "RW" + Twine(i) + "_mode");
283 
284  // Add read-only pipeline stages.
285  Value readAddr = addr;
286  Value readEn = en;
287  if (ignoreReadEnable) {
288  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
289  auto enLast = en;
290  if (j < e - 1)
291  readEn = addPipelineStages(b, moduleNamespace, 1, clock, en,
292  "RW" + Twine(i) + "_ren");
293  readAddr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
294  "RW" + Twine(i) + "_raddr", enLast);
295  }
296  } else {
297  readAddr =
298  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
299  clock, addr, "RW" + Twine(i) + "_raddr");
300  readEn =
301  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
302  clock, en, "RW" + Twine(i) + "_ren");
303  }
304  auto readWMode =
305  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
306  clock, wmode, "RW" + Twine(i) + "_rmode");
307 
308  // Add write-only pipeline stages.
309  auto writeAddr =
310  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
311  clock, addr, "RW" + Twine(i) + "_waddr");
312  auto writeEn =
313  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
314  clock, en, "RW" + Twine(i) + "_wen");
315  auto writeWMode =
316  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
317  clock, wmode, "RW" + Twine(i) + "_wmode");
318  wdataIn = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
319  wdataIn, "RW" + Twine(i) + "_wdata");
320  if (isMasked)
321  wmaskBits = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
322  wmaskBits, "RW" + Twine(i) + "_wmask");
323 
324  SmallVector<Value, 4> maskValues(maskBits);
325  SmallVector<Value, 4> dataValues(maskBits);
326  // For multi-bit mask, extract corresponding write data bits of
327  // mask-granularity size each. Each of the extracted data bits will be
328  // written to a register, gaurded by the corresponding mask bit.
329  for (size_t i = 0; i < maskBits; ++i) {
330  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
331  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
332  mem.maskGran);
333  }
334 
335  // wire to store read result
336  auto rWire = b.create<sv::WireOp>(wdataIn.getType());
337  Value rdata = b.create<sv::ReadInOutOp>(rWire);
338 
339  // Read logic.
340  Value rcond = b.createOrFold<comb::AndOp>(
341  readEn,
342  b.createOrFold<comb::ICmpOp>(
343  comb::ICmpPredicate::eq, readWMode,
344  b.createOrFold<ConstantOp>(readWMode.getType(), 0), false),
345  false);
346 
347  auto val = getMemoryRead(b, reg, readAddr, addMuxPragmas);
348  if (!ignoreReadEnable) {
349  Value x = b.create<sv::ConstantXOp>(val.getType());
350  val = b.create<comb::MuxOp>(rcond, val, x, false);
351  }
352  b.create<sv::AssignOp>(rWire, val);
353 
354  // Write logic gaurded by the corresponding mask bit.
355  for (auto wmask : llvm::enumerate(maskValues)) {
356  b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock, [&]() {
357  auto wcond = b.createOrFold<comb::AndOp>(
358  writeEn,
359  b.createOrFold<comb::AndOp>(wmask.value(), writeWMode, false),
360  false);
361  b.create<sv::IfOp>(wcond, [&]() {
362  Value slotReg = b.create<sv::ArrayIndexInOutOp>(reg, writeAddr);
363  b.create<sv::PAssignOp>(
364  b.createOrFold<sv::IndexedPartSelectInOutOp>(
365  slotReg,
366  b.createOrFold<ConstantOp>(b.getIntegerType(32),
367  wmask.index() * mem.maskGran),
368  mem.maskGran),
369  dataValues[wmask.index()]);
370  });
371  });
372  }
373  outputs.push_back(rdata);
374  }
375 
376  DenseMap<unsigned, Operation *> writeProcesses;
377  for (size_t i = 0; i < mem.numWritePorts; ++i) {
378  auto numStages = mem.writeLatency - 1;
379  Value addr = op.getBody().getArgument(inArg++);
380  Value en = op.getBody().getArgument(inArg++);
381  Value clock = op.getBody().getArgument(inArg++);
382  Value wdataIn = op.getBody().getArgument(inArg++);
383  Value wmaskBits;
384  // There are no input mask ports, if maskBits =1. Create a dummy true value
385  // for mask.
386  if (isMasked)
387  wmaskBits = op.getBody().getArgument(inArg++);
388  else
389  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
390  // Add pipeline stages
391  addr = addPipelineStages(b, moduleNamespace, numStages, clock, addr,
392  "W" + Twine(i) + "addr");
393  en = addPipelineStages(b, moduleNamespace, numStages, clock, en,
394  "W" + Twine(i) + "en");
395  wdataIn = addPipelineStages(b, moduleNamespace, numStages, clock, wdataIn,
396  "W" + Twine(i) + "data");
397  if (isMasked)
398  wmaskBits = addPipelineStages(b, moduleNamespace, numStages, clock,
399  wmaskBits, "W" + Twine(i) + "mask");
400 
401  SmallVector<Value, 4> maskValues(maskBits);
402  SmallVector<Value, 4> dataValues(maskBits);
403  // For multi-bit mask, extract corresponding write data bits of
404  // mask-granularity size each. Each of the extracted data bits will be
405  // written to a register, gaurded by the corresponding mask bit.
406  for (size_t i = 0; i < maskBits; ++i) {
407  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
408  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
409  mem.maskGran);
410  }
411  // Build write port logic.
412  auto writeLogic = [&] {
413  // For each register, create the connections to write the corresponding
414  // data into it.
415  for (auto wmask : llvm::enumerate(maskValues)) {
416  // Guard by corresponding mask bit.
417  auto wcond = b.createOrFold<comb::AndOp>(en, wmask.value(), false);
418  b.create<sv::IfOp>(wcond, [&]() {
419  auto slot = b.create<sv::ArrayIndexInOutOp>(reg, addr);
420  b.create<sv::PAssignOp>(
421  b.createOrFold<sv::IndexedPartSelectInOutOp>(
422  slot,
423  b.createOrFold<ConstantOp>(b.getIntegerType(32),
424  wmask.index() * mem.maskGran),
425  mem.maskGran),
426  dataValues[wmask.index()]);
427  });
428  }
429  };
430 
431  // Build a new always block with write port logic.
432  auto alwaysBlock = [&] {
433  return b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock,
434  [&]() { writeLogic(); });
435  };
436 
437  switch (mem.writeUnderWrite) {
438  // Undefined write order: lower each write port into a separate always
439  // block.
440  case seq::WUW::Undefined:
441  alwaysBlock();
442  break;
443  // Port-ordered write order: lower each write port into an always block
444  // based on its clock ID.
445  case seq::WUW::PortOrder:
446  if (auto *existingAlwaysBlock =
447  writeProcesses.lookup(mem.writeClockIDs[i])) {
448  OpBuilder::InsertionGuard guard(b);
449  b.setInsertionPointToEnd(
450  cast<sv::AlwaysOp>(existingAlwaysBlock).getBodyBlock());
451  writeLogic();
452  } else {
453  writeProcesses[i] = alwaysBlock();
454  }
455  }
456  }
457 
458  auto *outputOp = op.getBodyBlock()->getTerminator();
459  outputOp->setOperands(outputs);
460 
461  // Add logic to initialize the memory based on a file emission request. This
462  // disables randomization.
463  if (!mem.initFilename.empty()) {
464  // Set an inner symbol on the register if one does not exist.
465  if (!reg.getInnerSymAttr())
466  reg.setInnerSymAttr(hw::InnerSymAttr::get(
467  b.getStringAttr(moduleNamespace.newName(reg.getName()))));
468 
469  if (mem.initIsInline) {
470  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
471  b.create<sv::InitialOp>([&]() {
472  b.create<sv::ReadMemOp>(reg, mem.initFilename,
473  mem.initIsBinary
474  ? MemBaseTypeAttr::MemBaseBin
475  : MemBaseTypeAttr::MemBaseHex);
476  });
477  });
478  } else {
479  OpBuilder::InsertionGuard guard(b);
480 
481  // Create a new module with the readmem op.
482  b.setInsertionPointAfter(op);
483  auto boundModule = b.create<HWModuleOp>(
484  b.getStringAttr(mlirModuleNamespace.newName(op.getName() + "_init")),
485  ArrayRef<PortInfo>());
486 
487  auto filename = op->getAttrOfType<OutputFileAttr>("output_file");
488  if (filename) {
489  if (!filename.isDirectory()) {
490  SmallString<128> dir(filename.getFilename().getValue());
491  llvm::sys::path::remove_filename(dir);
492  filename = hw::OutputFileAttr::getFromDirectoryAndFilename(
493  b.getContext(), dir, boundModule.getName() + ".sv");
494  }
495  } else {
496  filename = hw::OutputFileAttr::getFromFilename(
497  b.getContext(), boundModule.getName() + ".sv");
498  }
499 
500  // Build the hierpathop
501  auto path = b.create<hw::HierPathOp>(
502  mlirModuleNamespace.newName(op.getName() + "_path"),
503  b.getArrayAttr(
504  ::InnerRefAttr::get(op.getNameAttr(), reg.getInnerNameAttr())));
505 
506  boundModule->setAttr("output_file", filename);
507  b.setInsertionPointToStart(op.getBodyBlock());
508  b.setInsertionPointToStart(boundModule.getBodyBlock());
509  b.create<sv::InitialOp>([&]() {
510  auto xmr = b.create<sv::XMRRefOp>(reg.getType(), path.getSymNameAttr());
511  b.create<sv::ReadMemOp>(xmr, mem.initFilename,
512  mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
513  : MemBaseTypeAttr::MemBaseHex);
514  });
515 
516  // Instantiate this new module inside the memory module.
517  b.setInsertionPointAfter(reg);
518  auto boundInstance = b.create<hw::InstanceOp>(
519  boundModule, boundModule.getName(), ArrayRef<Value>());
520  boundInstance->setAttr(
521  "inner_sym",
522  hw::InnerSymAttr::get(b.getStringAttr(
523  moduleNamespace.newName(boundInstance.getInstanceName()))));
524  boundInstance->setAttr("doNotPrint", b.getBoolAttr(true));
525 
526  // Bind the new module.
527  b.setInsertionPointAfter(boundModule);
528  auto bind = b.create<sv::BindOp>(hw::InnerRefAttr::get(
529  op.getNameAttr(), boundInstance.getInnerSymAttr().getSymName()));
530  bind->setAttr("output_file", filename);
531  }
532  }
533 
534  // Add logic to initialize the memory and any internal registers to random
535  // values.
536  if (disableMemRandomization && disableRegRandomization)
537  return;
538 
539  constexpr unsigned randomWidth = 32;
540  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
541  sv::RegOp randReg;
542  SmallVector<sv::RegOp> randRegs;
543  if (!disableRegRandomization) {
544  b.create<sv::IfDefOp>("RANDOMIZE_REG_INIT", [&]() {
545  signed totalWidth = 0;
546  for (sv::RegOp &reg : registers)
547  totalWidth += reg.getElementType().getIntOrFloatBitWidth();
548  while (totalWidth > 0) {
549  auto name = b.getStringAttr(moduleNamespace.newName("_RANDOM"));
550  auto innerSym = hw::InnerSymAttr::get(name);
551  randRegs.push_back(b.create<sv::RegOp>(b.getIntegerType(randomWidth),
552  name, innerSym));
553  totalWidth -= randomWidth;
554  }
555  });
556  }
557  auto randomMemReg = b.create<sv::RegOp>(
558  b.getIntegerType(llvm::divideCeil(mem.dataWidth, randomWidth) *
559  randomWidth),
560  b.getStringAttr("_RANDOM_MEM"));
561  b.create<sv::InitialOp>([&]() {
562  b.create<sv::VerbatimOp>("`INIT_RANDOM_PROLOG_");
563 
564  // Memory randomization logic. The entire memory is randomized.
565  if (!disableMemRandomization) {
566  b.create<sv::IfDefProceduralOp>("RANDOMIZE_MEM_INIT", [&]() {
567  auto outerLoopIndVarType =
568  b.getIntegerType(llvm::Log2_64_Ceil(mem.depth + 1));
569  auto innerUpperBoundWidth = randomMemReg.getType()
570  .getElementType()
571  .cast<IntegerType>()
572  .getWidth();
573  auto innerLoopIndVarType =
574  b.getIntegerType(llvm::Log2_64_Ceil(innerUpperBoundWidth + 1));
575  // Construct the following nested for loops:
576  // ```
577  // for (int i = 0; i < mem.depth; i++) begin
578  // for (int j = 0; j < randomMeg.size; j += 32)
579  // randomMem[j+31:j] = `RANDOM
580  // Memory[i] = randomMem[mem.dataWidth - 1: 0];
581  // ```
582  b.create<sv::ForOp>(
583  0, mem.depth, 1, outerLoopIndVarType, "i",
584  [&](BlockArgument outerIndVar) {
585  b.create<sv::ForOp>(
586  0, innerUpperBoundWidth, randomWidth, innerLoopIndVarType,
587  "j", [&](BlockArgument innerIndVar) {
588  auto rhs = b.create<sv::MacroRefExprSEOp>(
589  b.getIntegerType(randomWidth), "RANDOM");
590  auto lhs = b.create<sv::IndexedPartSelectInOutOp>(
591  randomMemReg, innerIndVar, randomWidth, false);
592  b.create<sv::BPAssignOp>(lhs, rhs);
593  });
594 
595  Value iterValue = outerIndVar;
596  // Truncate the induction variable if necessary.
597  if (!outerIndVar.getType().isInteger(
598  llvm::Log2_64_Ceil(mem.depth)))
599  iterValue = b.createOrFold<comb::ExtractOp>(
600  iterValue, 0, llvm::Log2_64_Ceil(mem.depth));
601  auto lhs = b.create<sv::ArrayIndexInOutOp>(reg, iterValue);
602  auto rhs = b.createOrFold<comb::ExtractOp>(
603  b.create<sv::ReadInOutOp>(randomMemReg), 0, mem.dataWidth);
604  b.create<sv::BPAssignOp>(lhs, rhs);
605  });
606  });
607  }
608 
609  // Register randomization logic. Randomize every register to a random
610  // making efficient use of available randomization registers.
611  //
612  // TODO: This shares a lot of common logic with LowerToHW. Combine
613  // these two in a common randomization utility.
614  if (!disableRegRandomization) {
615  b.create<sv::IfDefProceduralOp>("RANDOMIZE_REG_INIT", [&]() {
616  unsigned bits = randomWidth;
617  for (sv::RegOp &reg : randRegs)
618  b.create<sv::VerbatimOp>(
619  b.getStringAttr("{{0}} = {`RANDOM};"), ValueRange{},
620  b.getArrayAttr(hw::InnerRefAttr::get(op.getNameAttr(),
621  reg.getInnerNameAttr())));
622  auto randRegIdx = 0;
623  for (sv::RegOp &reg : registers) {
624  SmallVector<std::pair<Attribute, std::pair<size_t, size_t>>> values;
625  auto width = reg.getElementType().getIntOrFloatBitWidth();
626  auto widthRemaining = width;
627  while (widthRemaining > 0) {
628  if (bits == randomWidth) {
629  randReg = randRegs[randRegIdx++];
630  bits = 0;
631  }
632  auto innerRef = hw::InnerRefAttr::get(op.getNameAttr(),
633  randReg.getInnerNameAttr());
634  if (widthRemaining <= randomWidth - bits) {
635  values.push_back({innerRef, {bits + widthRemaining - 1, bits}});
636  bits += widthRemaining;
637  widthRemaining = 0;
638  continue;
639  }
640  values.push_back({innerRef, {randomWidth - 1, bits}});
641  widthRemaining -= (randomWidth - bits);
642  bits = randomWidth;
643  }
644  SmallString<32> rhs("{{0}} = ");
645  unsigned idx = 1;
646  assert(reg.getInnerSymAttr());
647  SmallVector<Attribute, 4> symbols({hw::InnerRefAttr::get(
648  op.getNameAttr(), reg.getInnerNameAttr())});
649  if (values.size() > 1)
650  rhs.append("{");
651  for (auto &v : values) {
652  if (idx > 1)
653  rhs.append(", ");
654  auto [sym, range] = v;
655  symbols.push_back(sym);
656  rhs.append(("{{" + Twine(idx++) + "}}").str());
657  // Do not emit a part select as the whole value is used.
658  if (range.first == randomWidth - 1 && range.second == 0)
659  continue;
660  // Emit a single bit part select, e.g., "[3]"
661  if (range.first == range.second) {
662  rhs.append(("[" + Twine(range.first) + "]").str());
663  continue;
664  }
665  // Emit a part select, e.g., "[4:2]"
666  rhs.append(
667  ("[" + Twine(range.first) + ":" + Twine(range.second) + "]")
668  .str());
669  }
670  if (values.size() > 1)
671  rhs.append("}");
672  rhs.append(";");
673  b.create<sv::VerbatimOp>(rhs, ValueRange{},
674  b.getArrayAttr(symbols));
675  }
676  });
677  }
678  });
679  });
680 }
681 
682 void HWMemSimImplPass::runOnOperation() {
683  auto topModule = getOperation();
684 
685  // Populate a namespace from the symbols visible to the top-level MLIR module.
686  // Memories with initializations create modules and these need to be legal
687  // symbols.
688  SymbolCache symbolCache;
689  symbolCache.addDefinitions(topModule);
690  Namespace mlirModuleNamespace;
691  mlirModuleNamespace.add(symbolCache);
692 
693  SmallVector<HWModuleGeneratedOp> toErase;
694  bool anythingChanged = false;
695 
696  for (auto op :
697  llvm::make_early_inc_range(topModule.getOps<HWModuleGeneratedOp>())) {
698  auto oldModule = cast<HWModuleGeneratedOp>(op);
699  auto gen = oldModule.getGeneratorKind();
700  auto genOp = cast<HWGeneratorSchemaOp>(
701  SymbolTable::lookupSymbolIn(getOperation(), gen));
702 
703  if (genOp.getDescriptor() == "FIRRTL_Memory") {
704  FirMemory mem(oldModule);
705 
706  OpBuilder builder(oldModule);
707  auto nameAttr = builder.getStringAttr(oldModule.getName());
708 
709  // The requirements for macro replacement:
710  // 1. read latency and write latency of one.
711  // 2. undefined read-under-write behavior.
712  if (replSeqMem && ((mem.readLatency == 1 && mem.writeLatency == 1) &&
713  mem.dataWidth > 0)) {
714  builder.create<HWModuleExternOp>(oldModule.getLoc(), nameAttr,
715  oldModule.getPortList());
716  } else {
717  auto newModule = builder.create<HWModuleOp>(
718  oldModule.getLoc(), nameAttr, oldModule.getPortList());
719  if (auto outdir = oldModule->getAttr("output_file"))
720  newModule->setAttr("output_file", outdir);
721  newModule.setCommentAttr(
722  builder.getStringAttr("VCS coverage exclude_file"));
723 
724  HWMemSimImpl(ignoreReadEnable, addMuxPragmas, disableMemRandomization,
725  disableRegRandomization,
726  addVivadoRAMAddressConflictSynthesisBugWorkaround,
727  mlirModuleNamespace)
728  .generateMemory(newModule, mem);
729  }
730 
731  oldModule.erase();
732  anythingChanged = true;
733  }
734  }
735 
736  if (!anythingChanged)
737  markAllAnalysesPreserved();
738 }
739 
740 std::unique_ptr<Pass>
741 circt::seq::createHWMemSimImplPass(const HWMemSimImplOptions &options) {
742  return std::make_unique<HWMemSimImplPass>(options);
743 }
lowerAnnotationsNoRefTypePorts FirtoolPreserveValuesMode value
Definition: Firtool.cpp:95
assert(baseType &&"element must be base type")
int32_t width
Definition: FIRRTL.cpp:27
static bool valueDefinedBeforeOp(Value value, Operation *op)
A helper that returns true if a value definition (or block argument) is visible to another operation,...
static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr, bool addMuxPragmas)
llvm::SmallVector< StringAttr > outputs
Builder builder
A namespace that is used to store existing names and generate new names in some scope within the IR.
Definition: Namespace.h:29
void add(SymbolCache &symCache)
SymbolCache initializer; initialize from every key that is convertible to a StringAttr in the SymbolC...
Definition: Namespace.h:47
void addDefinitions(mlir::Operation *top)
Populate the symbol cache with all symbol-defining operations within the 'top' operation.
Definition: SymCache.cpp:23
Default symbol cache implementation; stores associations between names (StringAttr's) to mlir::Operat...
Definition: SymCache.h:85
def create(low_bit, result_type, input=None)
Definition: comb.py:187
def create(dest, src)
Definition: sv.py:98
Definition: sv.py:15
def create(value)
Definition: sv.py:106
Definition: sv.py:68
Definition: sv.py:35
def create(data_type, name=None, sym_name=None)
Definition: sv.py:61
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition: CalyxOps.cpp:53
uint64_t getWidth(Type t)
Definition: ESIPasses.cpp:34
std::unique_ptr< mlir::Pass > createHWMemSimImplPass(const HWMemSimImplOptions &options={})
circt::hw::InOutType InOutType
Definition: SVTypes.h:25
void setSVAttributes(mlir::Operation *op, mlir::ArrayAttr attrs)
Set the SV attributes of an operation.
This file defines an intermediate representation for circuits acting as an abstraction for constraint...
Definition: DebugAnalysis.h:21
Definition: hw.py:1
Definition: seq.py:1
def reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)
Definition: seq.py:20