CIRCT  19.0.0git
HWMemSimImpl.cpp
Go to the documentation of this file.
1 //===- HWMemSimImpl.cpp - HW Memory Implementation Pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This transformation pass converts generated FIRRTL memory modules to
10 // simulation models.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetails.h"
18 #include "circt/Dialect/HW/HWOps.h"
21 #include "circt/Dialect/SV/SVOps.h"
24 #include "mlir/IR/ImplicitLocOpBuilder.h"
25 #include "llvm/ADT/TypeSwitch.h"
26 #include "llvm/Support/Path.h"
27 
28 using namespace circt;
29 using namespace hw;
30 using namespace seq;
31 
32 namespace circt {
33 namespace seq {
34 #define GEN_PASS_DEF_HWMEMSIMIMPL
35 #include "circt/Dialect/Seq/SeqPasses.h.inc"
36 } // namespace seq
37 } // namespace circt
38 
39 //===----------------------------------------------------------------------===//
40 // HWMemSimImplPass Pass
41 //===----------------------------------------------------------------------===//
42 
43 namespace {
44 
45 class HWMemSimImpl {
46  ReadEnableMode readEnableMode;
47  bool addMuxPragmas;
48  bool disableMemRandomization;
49  bool disableRegRandomization;
50  bool addVivadoRAMAddressConflictSynthesisBugWorkaround;
51 
52  SmallVector<sv::RegOp> registers;
53 
54  Value addPipelineStages(ImplicitLocOpBuilder &b,
55  hw::InnerSymbolNamespace &moduleNamespace,
56  size_t stages, Value clock, Value data,
57  const Twine &name, Value gate = {});
58  sv::AlwaysOp lastPipelineAlwaysOp;
59 
60 public:
61  Namespace &mlirModuleNamespace;
62 
63  HWMemSimImpl(ReadEnableMode readEnableMode, bool addMuxPragmas,
64  bool disableMemRandomization, bool disableRegRandomization,
65  bool addVivadoRAMAddressConflictSynthesisBugWorkaround,
66  Namespace &mlirModuleNamespace)
67  : readEnableMode(readEnableMode), addMuxPragmas(addMuxPragmas),
68  disableMemRandomization(disableMemRandomization),
69  disableRegRandomization(disableRegRandomization),
70  addVivadoRAMAddressConflictSynthesisBugWorkaround(
71  addVivadoRAMAddressConflictSynthesisBugWorkaround),
72  mlirModuleNamespace(mlirModuleNamespace) {}
73 
74  void generateMemory(HWModuleOp op, FirMemory mem);
75 };
76 
77 struct HWMemSimImplPass : public impl::HWMemSimImplBase<HWMemSimImplPass> {
78  using HWMemSimImplBase::HWMemSimImplBase;
79 
80  void runOnOperation() override;
81 };
82 
83 } // end anonymous namespace
84 
85 /// A helper that returns true if a value definition (or block argument) is
86 /// visible to another operation, either because it's a block argument or
87 /// because the defining op is before that other op.
88 static bool valueDefinedBeforeOp(Value value, Operation *op) {
89  Operation *valueOp = value.getDefiningOp();
90  Block *valueBlock =
91  valueOp ? valueOp->getBlock() : cast<BlockArgument>(value).getOwner();
92  while (op->getBlock() && op->getBlock() != valueBlock)
93  op = op->getParentOp();
94  return valueBlock == op->getBlock() &&
95  (!valueOp || valueOp->isBeforeInBlock(op));
96 }
97 
98 //
99 // Construct memory read annotated with mux pragmas in the following
100 // form:
101 // ```
102 // wire GEN;
103 // /* synopsys infer_mux_override */
104 // assign GEN = memory[addr] /* cadence map_to_mux */;
105 // ```
106 // If `addMuxPragmas` is enabled, just return the read value without
107 // annotations.
108 static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr,
109  bool addMuxPragmas) {
110  auto slot =
111  b.create<sv::ReadInOutOp>(b.create<sv::ArrayIndexInOutOp>(memory, addr));
112  // If we don't want to add mux pragmas, just return the read value.
113  if (!addMuxPragmas ||
114  cast<hw::UnpackedArrayType>(
115  cast<hw::InOutType>(memory.getType()).getElementType())
116  .getNumElements() <= 1)
117  return slot;
119  slot, sv::SVAttributeAttr::get(b.getContext(), "cadence map_to_mux",
120  /*emitAsComment=*/true));
121  auto valWire = b.create<sv::WireOp>(slot.getType());
122  auto assignOp = b.create<sv::AssignOp>(valWire, slot);
123  sv::setSVAttributes(assignOp,
124  sv::SVAttributeAttr::get(b.getContext(),
125  "synopsys infer_mux_override",
126  /*emitAsComment=*/true));
127 
128  return b.create<sv::ReadInOutOp>(valWire);
129 }
130 
131 Value HWMemSimImpl::addPipelineStages(ImplicitLocOpBuilder &b,
132  hw::InnerSymbolNamespace &moduleNamespace,
133  size_t stages, Value clock, Value data,
134  const Twine &name, Value gate) {
135  if (!stages)
136  return data;
137 
138  // Try to reuse the previous always block. This is only possible if the clocks
139  // agree and the data and gate all dominate the always block.
140  auto alwaysOp = lastPipelineAlwaysOp;
141  if (alwaysOp) {
142  if (alwaysOp.getClocks() != ValueRange{clock} ||
143  !valueDefinedBeforeOp(data, alwaysOp) ||
144  (gate && !valueDefinedBeforeOp(gate, alwaysOp)))
145  alwaysOp = {};
146  }
147  if (!alwaysOp)
148  alwaysOp = b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock);
149 
150  // Add the necessary registers.
151  auto savedIP = b.saveInsertionPoint();
152  SmallVector<sv::RegOp> regs;
153  b.setInsertionPoint(alwaysOp);
154  for (unsigned i = 0; i < stages; ++i) {
155  auto regName =
156  b.getStringAttr(moduleNamespace.newName("_" + name + "_d" + Twine(i)));
157  auto reg = b.create<sv::RegOp>(data.getType(), regName,
158  hw::InnerSymAttr::get(regName));
159  regs.push_back(reg);
160  registers.push_back(reg);
161  }
162 
163  // Populate the assignments in the always block.
164  b.setInsertionPointToEnd(alwaysOp.getBodyBlock());
165  for (unsigned i = 0; i < stages; ++i) {
166  if (i > 0)
167  data = b.create<sv::ReadInOutOp>(data);
168  auto emitAssign = [&] { b.create<sv::PAssignOp>(regs[i], data); };
169  if (gate)
170  b.create<sv::IfOp>(gate, [&]() { emitAssign(); });
171  else
172  emitAssign();
173  data = regs[i];
174  gate = {};
175  }
176  b.restoreInsertionPoint(savedIP);
177  data = b.create<sv::ReadInOutOp>(data);
178 
179  lastPipelineAlwaysOp = alwaysOp;
180  return data;
181 }
182 
183 void HWMemSimImpl::generateMemory(HWModuleOp op, FirMemory mem) {
184  ImplicitLocOpBuilder b(op.getLoc(), op.getBody());
185 
186  InnerSymbolNamespace moduleNamespace(op);
187 
188  // Compute total number of mask bits.
189  if (mem.maskGran == 0)
190  mem.maskGran = mem.dataWidth;
191  auto maskBits = mem.dataWidth / mem.maskGran;
192  bool isMasked = maskBits > 1;
193  // Each mask bit controls mask-granularity number of data bits.
194  auto dataType = b.getIntegerType(mem.dataWidth);
195 
196  // Count the total number of ports.
197  unsigned numPorts =
198  mem.numReadPorts + mem.numWritePorts + mem.numReadWritePorts;
199 
200  // Create registers for the memory.
201  sv::RegOp reg = b.create<sv::RegOp>(
202  UnpackedArrayType::get(dataType, mem.depth), b.getStringAttr("Memory"));
203 
204  if (addVivadoRAMAddressConflictSynthesisBugWorkaround) {
205  if (mem.readLatency == 0) {
206  // If the read latency is zero, we regard the memory as write-first.
207  // We add a SV attribute to specify a ram style to use LUTs for Vivado
208  // to avoid a bug that miscompiles the write-first memory. See "RAM
209  // address conflict and Vivado synthesis bug" issue in the vivado forum
210  // for the more detail.
212  reg, sv::SVAttributeAttr::get(b.getContext(), "ram_style",
213  R"("distributed")",
214  /*emitAsComment=*/false));
215  } else if (mem.readLatency == 1 && numPorts > 1) {
216  // If the read address is registered and the RAM has multiple ports,
217  // force write-first behaviour by setting rw_addr_collision. This avoids
218  // unpredictable behaviour. Downstreams flows should watch for `VPL
219  // 8-6430`.
221  reg, sv::SVAttributeAttr::get(b.getContext(), "rw_addr_collision",
222  R"("yes")", /*emitAsComment=*/false));
223  }
224  }
225 
226  SmallVector<Value, 4> outputs;
227 
228  size_t inArg = 0;
229  for (size_t i = 0; i < mem.numReadPorts; ++i) {
230  Value addr = op.getBody().getArgument(inArg++);
231  Value en = op.getBody().getArgument(inArg++);
232  Value clock = op.getBody().getArgument(inArg++);
233  // Add pipeline stages
234  if (readEnableMode == ReadEnableMode::Ignore) {
235  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
236  auto enLast = en;
237  if (j < e - 1)
238  en = addPipelineStages(b, moduleNamespace, 1, clock, en,
239  "R" + Twine(i) + "_en");
240  addr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
241  "R" + Twine(i) + "_addr", enLast);
242  }
243  } else {
244  en = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, en,
245  "R" + Twine(i) + "_en");
246  addr = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, addr,
247  "R" + Twine(i) + "_addr");
248  }
249 
250  // Read Logic
251  Value rdata = getMemoryRead(b, reg, addr, addMuxPragmas);
252  switch (readEnableMode) {
253  case ReadEnableMode::Undefined: {
254  Value x = b.create<sv::ConstantXOp>(rdata.getType());
255  rdata = b.create<comb::MuxOp>(en, rdata, x, false);
256  break;
257  }
258  case ReadEnableMode::Zero: {
259  Value x = b.create<hw::ConstantOp>(rdata.getType(), 0);
260  rdata = b.create<comb::MuxOp>(en, rdata, x, false);
261  break;
262  }
263  case ReadEnableMode::Ignore:
264  break;
265  }
266  outputs.push_back(rdata);
267  }
268 
269  for (size_t i = 0; i < mem.numReadWritePorts; ++i) {
270  auto numReadStages = mem.readLatency;
271  auto numWriteStages = mem.writeLatency - 1;
272  auto numCommonStages = std::min(numReadStages, numWriteStages);
273  Value addr = op.getBody().getArgument(inArg++);
274  Value en = op.getBody().getArgument(inArg++);
275  Value clock = op.getBody().getArgument(inArg++);
276  Value wmode = op.getBody().getArgument(inArg++);
277  Value wdataIn = op.getBody().getArgument(inArg++);
278  Value wmaskBits;
279  // There are no input mask ports, if maskBits =1. Create a dummy true value
280  // for mask.
281  if (isMasked)
282  wmaskBits = op.getBody().getArgument(inArg++);
283  else
284  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
285 
286  // Add common pipeline stages.
287  addr = addPipelineStages(b, moduleNamespace, numCommonStages, clock, addr,
288  "RW" + Twine(i) + "_addr");
289  en = addPipelineStages(b, moduleNamespace, numCommonStages, clock, en,
290  "RW" + Twine(i) + "_en");
291  wmode = addPipelineStages(b, moduleNamespace, numCommonStages, clock, wmode,
292  "RW" + Twine(i) + "_mode");
293 
294  // Add read-only pipeline stages.
295  Value readAddr = addr;
296  Value readEn = en;
297  if (readEnableMode == ReadEnableMode::Ignore) {
298  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
299  auto enLast = en;
300  if (j < e - 1)
301  readEn = addPipelineStages(b, moduleNamespace, 1, clock, en,
302  "RW" + Twine(i) + "_ren");
303  readAddr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
304  "RW" + Twine(i) + "_raddr", enLast);
305  }
306  } else {
307  readAddr =
308  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
309  clock, addr, "RW" + Twine(i) + "_raddr");
310  readEn =
311  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
312  clock, en, "RW" + Twine(i) + "_ren");
313  }
314  auto readWMode =
315  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
316  clock, wmode, "RW" + Twine(i) + "_rmode");
317 
318  // Add write-only pipeline stages.
319  auto writeAddr =
320  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
321  clock, addr, "RW" + Twine(i) + "_waddr");
322  auto writeEn =
323  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
324  clock, en, "RW" + Twine(i) + "_wen");
325  auto writeWMode =
326  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
327  clock, wmode, "RW" + Twine(i) + "_wmode");
328  wdataIn = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
329  wdataIn, "RW" + Twine(i) + "_wdata");
330  if (isMasked)
331  wmaskBits = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
332  wmaskBits, "RW" + Twine(i) + "_wmask");
333 
334  SmallVector<Value, 4> maskValues(maskBits);
335  SmallVector<Value, 4> dataValues(maskBits);
336  // For multi-bit mask, extract corresponding write data bits of
337  // mask-granularity size each. Each of the extracted data bits will be
338  // written to a register, gaurded by the corresponding mask bit.
339  for (size_t i = 0; i < maskBits; ++i) {
340  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
341  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
342  mem.maskGran);
343  }
344 
345  // wire to store read result
346  auto rWire = b.create<sv::WireOp>(wdataIn.getType());
347  Value rdata = b.create<sv::ReadInOutOp>(rWire);
348 
349  // Read logic.
350  Value rcond = b.createOrFold<comb::AndOp>(
351  readEn,
352  b.createOrFold<comb::ICmpOp>(
353  comb::ICmpPredicate::eq, readWMode,
354  b.createOrFold<ConstantOp>(readWMode.getType(), 0), false),
355  false);
356 
357  auto val = getMemoryRead(b, reg, readAddr, addMuxPragmas);
358 
359  switch (readEnableMode) {
360  case ReadEnableMode::Undefined: {
361  Value x = b.create<sv::ConstantXOp>(val.getType());
362  val = b.create<comb::MuxOp>(rcond, val, x, false);
363  break;
364  }
365  case ReadEnableMode::Zero: {
366  Value x = b.create<hw::ConstantOp>(val.getType(), 0);
367  val = b.create<comb::MuxOp>(rcond, val, x, false);
368  break;
369  }
370  case ReadEnableMode::Ignore:
371  break;
372  }
373  b.create<sv::AssignOp>(rWire, val);
374 
375  // Write logic gaurded by the corresponding mask bit.
376  for (auto wmask : llvm::enumerate(maskValues)) {
377  b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock, [&]() {
378  auto wcond = b.createOrFold<comb::AndOp>(
379  writeEn,
380  b.createOrFold<comb::AndOp>(wmask.value(), writeWMode, false),
381  false);
382  b.create<sv::IfOp>(wcond, [&]() {
383  Value slotReg = b.create<sv::ArrayIndexInOutOp>(reg, writeAddr);
384  b.create<sv::PAssignOp>(
385  b.createOrFold<sv::IndexedPartSelectInOutOp>(
386  slotReg,
387  b.createOrFold<ConstantOp>(b.getIntegerType(32),
388  wmask.index() * mem.maskGran),
389  mem.maskGran),
390  dataValues[wmask.index()]);
391  });
392  });
393  }
394  outputs.push_back(rdata);
395  }
396 
397  DenseMap<unsigned, Operation *> writeProcesses;
398  for (size_t i = 0; i < mem.numWritePorts; ++i) {
399  auto numStages = mem.writeLatency - 1;
400  Value addr = op.getBody().getArgument(inArg++);
401  Value en = op.getBody().getArgument(inArg++);
402  Value clock = op.getBody().getArgument(inArg++);
403  Value wdataIn = op.getBody().getArgument(inArg++);
404  Value wmaskBits;
405  // There are no input mask ports, if maskBits =1. Create a dummy true value
406  // for mask.
407  if (isMasked)
408  wmaskBits = op.getBody().getArgument(inArg++);
409  else
410  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
411  // Add pipeline stages
412  addr = addPipelineStages(b, moduleNamespace, numStages, clock, addr,
413  "W" + Twine(i) + "addr");
414  en = addPipelineStages(b, moduleNamespace, numStages, clock, en,
415  "W" + Twine(i) + "en");
416  wdataIn = addPipelineStages(b, moduleNamespace, numStages, clock, wdataIn,
417  "W" + Twine(i) + "data");
418  if (isMasked)
419  wmaskBits = addPipelineStages(b, moduleNamespace, numStages, clock,
420  wmaskBits, "W" + Twine(i) + "mask");
421 
422  SmallVector<Value, 4> maskValues(maskBits);
423  SmallVector<Value, 4> dataValues(maskBits);
424  // For multi-bit mask, extract corresponding write data bits of
425  // mask-granularity size each. Each of the extracted data bits will be
426  // written to a register, gaurded by the corresponding mask bit.
427  for (size_t i = 0; i < maskBits; ++i) {
428  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
429  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
430  mem.maskGran);
431  }
432  // Build write port logic.
433  auto writeLogic = [&] {
434  // For each register, create the connections to write the corresponding
435  // data into it.
436  for (auto wmask : llvm::enumerate(maskValues)) {
437  // Guard by corresponding mask bit.
438  auto wcond = b.createOrFold<comb::AndOp>(en, wmask.value(), false);
439  b.create<sv::IfOp>(wcond, [&]() {
440  auto slot = b.create<sv::ArrayIndexInOutOp>(reg, addr);
441  b.create<sv::PAssignOp>(
442  b.createOrFold<sv::IndexedPartSelectInOutOp>(
443  slot,
444  b.createOrFold<ConstantOp>(b.getIntegerType(32),
445  wmask.index() * mem.maskGran),
446  mem.maskGran),
447  dataValues[wmask.index()]);
448  });
449  }
450  };
451 
452  // Build a new always block with write port logic.
453  auto alwaysBlock = [&] {
454  return b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock,
455  [&]() { writeLogic(); });
456  };
457 
458  switch (mem.writeUnderWrite) {
459  // Undefined write order: lower each write port into a separate always
460  // block.
461  case seq::WUW::Undefined:
462  alwaysBlock();
463  break;
464  // Port-ordered write order: lower each write port into an always block
465  // based on its clock ID.
466  case seq::WUW::PortOrder:
467  if (auto *existingAlwaysBlock =
468  writeProcesses.lookup(mem.writeClockIDs[i])) {
469  OpBuilder::InsertionGuard guard(b);
470  b.setInsertionPointToEnd(
471  cast<sv::AlwaysOp>(existingAlwaysBlock).getBodyBlock());
472  writeLogic();
473  } else {
474  writeProcesses[i] = alwaysBlock();
475  }
476  }
477  }
478 
479  auto *outputOp = op.getBodyBlock()->getTerminator();
480  outputOp->setOperands(outputs);
481 
482  // Add logic to initialize the memory based on a file emission request. This
483  // disables randomization.
484  if (!mem.initFilename.empty()) {
485  // Set an inner symbol on the register if one does not exist.
486  if (!reg.getInnerSymAttr())
487  reg.setInnerSymAttr(hw::InnerSymAttr::get(
488  b.getStringAttr(moduleNamespace.newName(reg.getName()))));
489 
490  if (mem.initIsInline) {
491  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
492  b.create<sv::InitialOp>([&]() {
493  b.create<sv::ReadMemOp>(reg, mem.initFilename,
494  mem.initIsBinary
495  ? MemBaseTypeAttr::MemBaseBin
496  : MemBaseTypeAttr::MemBaseHex);
497  });
498  });
499  } else {
500  OpBuilder::InsertionGuard guard(b);
501 
502  // Assign a name to the bound module.
503  StringAttr boundModuleName =
504  b.getStringAttr(mlirModuleNamespace.newName(op.getName() + "_init"));
505 
506  // Generate a name for the file containing the bound module and the bind.
507  StringAttr filename;
508  if (auto fileAttr = op->getAttrOfType<OutputFileAttr>("output_file")) {
509  if (!fileAttr.isDirectory()) {
510  SmallString<128> path(fileAttr.getFilename().getValue());
511  llvm::sys::path::remove_filename(path);
512  llvm::sys::path::append(path, boundModuleName.getValue() + ".sv");
513  filename = b.getStringAttr(path);
514  } else {
515  filename = fileAttr.getFilename();
516  }
517  } else {
518  filename = b.getStringAttr(boundModuleName.getValue() + ".sv");
519  }
520 
521  // Create a new module with the readmem op.
522  b.setInsertionPointAfter(op);
523  auto boundModule =
524  b.create<HWModuleOp>(boundModuleName, ArrayRef<PortInfo>());
525 
526  // Build the hierpathop
527  auto path = b.create<hw::HierPathOp>(
528  mlirModuleNamespace.newName(op.getName() + "_path"),
529  b.getArrayAttr(
530  ::InnerRefAttr::get(op.getNameAttr(), reg.getInnerNameAttr())));
531 
532  b.setInsertionPointToStart(boundModule.getBodyBlock());
533  b.create<sv::InitialOp>([&]() {
534  auto xmr = b.create<sv::XMRRefOp>(reg.getType(), path.getSymNameAttr());
535  b.create<sv::ReadMemOp>(xmr, mem.initFilename,
536  mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
537  : MemBaseTypeAttr::MemBaseHex);
538  });
539 
540  // Instantiate this new module inside the memory module.
541  b.setInsertionPointAfter(reg);
542  auto boundInstance = b.create<hw::InstanceOp>(
543  boundModule, boundModule.getName(), ArrayRef<Value>());
544  boundInstance->setAttr(
545  "inner_sym",
546  hw::InnerSymAttr::get(b.getStringAttr(
547  moduleNamespace.newName(boundInstance.getInstanceName()))));
548  boundInstance->setAttr("doNotPrint", b.getBoolAttr(true));
549 
550  // Build the file container and reference the module from it.
551  b.setInsertionPointAfter(op);
552  b.create<emit::FileOp>(filename, [&] {
553  b.create<emit::RefOp>(FlatSymbolRefAttr::get(boundModuleName));
554  b.create<sv::BindOp>(hw::InnerRefAttr::get(
555  op.getNameAttr(), boundInstance.getInnerSymAttr().getSymName()));
556  });
557  }
558  }
559 
560  // Add logic to initialize the memory and any internal registers to random
561  // values.
562  if (disableMemRandomization && disableRegRandomization)
563  return;
564 
565  constexpr unsigned randomWidth = 32;
566  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
567  sv::RegOp randReg;
568  SmallVector<sv::RegOp> randRegs;
569  if (!disableRegRandomization) {
570  b.create<sv::IfDefOp>("RANDOMIZE_REG_INIT", [&]() {
571  signed totalWidth = 0;
572  for (sv::RegOp &reg : registers)
573  totalWidth += reg.getElementType().getIntOrFloatBitWidth();
574  while (totalWidth > 0) {
575  auto name = b.getStringAttr(moduleNamespace.newName("_RANDOM"));
576  auto innerSym = hw::InnerSymAttr::get(name);
577  randRegs.push_back(b.create<sv::RegOp>(b.getIntegerType(randomWidth),
578  name, innerSym));
579  totalWidth -= randomWidth;
580  }
581  });
582  }
583  auto randomMemReg = b.create<sv::RegOp>(
584  b.getIntegerType(llvm::divideCeil(mem.dataWidth, randomWidth) *
585  randomWidth),
586  b.getStringAttr("_RANDOM_MEM"));
587  b.create<sv::InitialOp>([&]() {
588  b.create<sv::VerbatimOp>("`INIT_RANDOM_PROLOG_");
589 
590  // Memory randomization logic. The entire memory is randomized.
591  if (!disableMemRandomization) {
592  b.create<sv::IfDefProceduralOp>("RANDOMIZE_MEM_INIT", [&]() {
593  auto outerLoopIndVarType =
594  b.getIntegerType(llvm::Log2_64_Ceil(mem.depth + 1));
595  auto innerUpperBoundWidth =
596  cast<IntegerType>(randomMemReg.getType().getElementType())
597  .getWidth();
598  auto innerLoopIndVarType =
599  b.getIntegerType(llvm::Log2_64_Ceil(innerUpperBoundWidth + 1));
600  // Construct the following nested for loops:
601  // ```
602  // for (int i = 0; i < mem.depth; i++) begin
603  // for (int j = 0; j < randomMeg.size; j += 32)
604  // randomMem[j+31:j] = `RANDOM
605  // Memory[i] = randomMem[mem.dataWidth - 1: 0];
606  // ```
607  b.create<sv::ForOp>(
608  0, mem.depth, 1, outerLoopIndVarType, "i",
609  [&](BlockArgument outerIndVar) {
610  b.create<sv::ForOp>(
611  0, innerUpperBoundWidth, randomWidth, innerLoopIndVarType,
612  "j", [&](BlockArgument innerIndVar) {
613  auto rhs = b.create<sv::MacroRefExprSEOp>(
614  b.getIntegerType(randomWidth), "RANDOM");
615  auto lhs = b.create<sv::IndexedPartSelectInOutOp>(
616  randomMemReg, innerIndVar, randomWidth, false);
617  b.create<sv::BPAssignOp>(lhs, rhs);
618  });
619 
620  Value iterValue = outerIndVar;
621  // Truncate the induction variable if necessary.
622  if (!outerIndVar.getType().isInteger(
623  llvm::Log2_64_Ceil(mem.depth)))
624  iterValue = b.createOrFold<comb::ExtractOp>(
625  iterValue, 0, llvm::Log2_64_Ceil(mem.depth));
626  auto lhs = b.create<sv::ArrayIndexInOutOp>(reg, iterValue);
627  auto rhs = b.createOrFold<comb::ExtractOp>(
628  b.create<sv::ReadInOutOp>(randomMemReg), 0, mem.dataWidth);
629  b.create<sv::BPAssignOp>(lhs, rhs);
630  });
631  });
632  }
633 
634  // Register randomization logic. Randomize every register to a random
635  // making efficient use of available randomization registers.
636  //
637  // TODO: This shares a lot of common logic with LowerToHW. Combine
638  // these two in a common randomization utility.
639  if (!disableRegRandomization) {
640  b.create<sv::IfDefProceduralOp>("RANDOMIZE_REG_INIT", [&]() {
641  unsigned bits = randomWidth;
642  for (sv::RegOp &reg : randRegs)
643  b.create<sv::VerbatimOp>(
644  b.getStringAttr("{{0}} = {`RANDOM};"), ValueRange{},
645  b.getArrayAttr(hw::InnerRefAttr::get(op.getNameAttr(),
646  reg.getInnerNameAttr())));
647  auto randRegIdx = 0;
648  for (sv::RegOp &reg : registers) {
649  SmallVector<std::pair<Attribute, std::pair<size_t, size_t>>> values;
650  auto width = reg.getElementType().getIntOrFloatBitWidth();
651  auto widthRemaining = width;
652  while (widthRemaining > 0) {
653  if (bits == randomWidth) {
654  randReg = randRegs[randRegIdx++];
655  bits = 0;
656  }
657  auto innerRef = hw::InnerRefAttr::get(op.getNameAttr(),
658  randReg.getInnerNameAttr());
659  if (widthRemaining <= randomWidth - bits) {
660  values.push_back({innerRef, {bits + widthRemaining - 1, bits}});
661  bits += widthRemaining;
662  widthRemaining = 0;
663  continue;
664  }
665  values.push_back({innerRef, {randomWidth - 1, bits}});
666  widthRemaining -= (randomWidth - bits);
667  bits = randomWidth;
668  }
669  SmallString<32> rhs("{{0}} = ");
670  unsigned idx = 1;
671  assert(reg.getInnerSymAttr());
672  SmallVector<Attribute, 4> symbols({hw::InnerRefAttr::get(
673  op.getNameAttr(), reg.getInnerNameAttr())});
674  if (values.size() > 1)
675  rhs.append("{");
676  for (auto &v : values) {
677  if (idx > 1)
678  rhs.append(", ");
679  auto [sym, range] = v;
680  symbols.push_back(sym);
681  rhs.append(("{{" + Twine(idx++) + "}}").str());
682  // Do not emit a part select as the whole value is used.
683  if (range.first == randomWidth - 1 && range.second == 0)
684  continue;
685  // Emit a single bit part select, e.g., "[3]"
686  if (range.first == range.second) {
687  rhs.append(("[" + Twine(range.first) + "]").str());
688  continue;
689  }
690  // Emit a part select, e.g., "[4:2]"
691  rhs.append(
692  ("[" + Twine(range.first) + ":" + Twine(range.second) + "]")
693  .str());
694  }
695  if (values.size() > 1)
696  rhs.append("}");
697  rhs.append(";");
698  b.create<sv::VerbatimOp>(rhs, ValueRange{},
699  b.getArrayAttr(symbols));
700  }
701  });
702  }
703  });
704  });
705 }
706 
707 void HWMemSimImplPass::runOnOperation() {
708  auto topModule = getOperation();
709 
710  // Populate a namespace from the symbols visible to the top-level MLIR module.
711  // Memories with initializations create modules and these need to be legal
712  // symbols.
713  SymbolCache symbolCache;
714  symbolCache.addDefinitions(topModule);
715  Namespace mlirModuleNamespace;
716  mlirModuleNamespace.add(symbolCache);
717 
718  SmallVector<HWModuleGeneratedOp> toErase;
719  bool anythingChanged = false;
720 
721  for (auto op :
722  llvm::make_early_inc_range(topModule.getOps<HWModuleGeneratedOp>())) {
723  auto oldModule = cast<HWModuleGeneratedOp>(op);
724  auto gen = oldModule.getGeneratorKind();
725  auto genOp = cast<HWGeneratorSchemaOp>(
726  SymbolTable::lookupSymbolIn(getOperation(), gen));
727 
728  if (genOp.getDescriptor() == "FIRRTL_Memory") {
729  FirMemory mem(oldModule);
730 
731  OpBuilder builder(oldModule);
732  auto nameAttr = builder.getStringAttr(oldModule.getName());
733 
734  // The requirements for macro replacement:
735  // 1. read latency and write latency of one.
736  // 2. undefined read-under-write behavior.
737  if (replSeqMem && ((mem.readLatency == 1 && mem.writeLatency == 1) &&
738  mem.dataWidth > 0)) {
739  builder.create<HWModuleExternOp>(oldModule.getLoc(), nameAttr,
740  oldModule.getPortList());
741  } else {
742  auto newModule = builder.create<HWModuleOp>(
743  oldModule.getLoc(), nameAttr, oldModule.getPortList());
744  if (auto outdir = oldModule->getAttr("output_file"))
745  newModule->setAttr("output_file", outdir);
746  newModule.setCommentAttr(
747  builder.getStringAttr("VCS coverage exclude_file"));
748  newModule.setPrivate();
749 
750  HWMemSimImpl(readEnableMode, addMuxPragmas, disableMemRandomization,
751  disableRegRandomization,
752  addVivadoRAMAddressConflictSynthesisBugWorkaround,
753  mlirModuleNamespace)
754  .generateMemory(newModule, mem);
755  }
756 
757  oldModule.erase();
758  anythingChanged = true;
759  }
760  }
761 
762  if (!anythingChanged)
763  markAllAnalysesPreserved();
764 }
765 
766 std::unique_ptr<Pass>
767 circt::seq::createHWMemSimImplPass(const HWMemSimImplOptions &options) {
768  return std::make_unique<HWMemSimImplPass>(options);
769 }
assert(baseType &&"element must be base type")
int32_t width
Definition: FIRRTL.cpp:36
static bool valueDefinedBeforeOp(Value value, Operation *op)
A helper that returns true if a value definition (or block argument) is visible to another operation,...
static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr, bool addMuxPragmas)
llvm::SmallVector< StringAttr > outputs
Builder builder
static StringAttr append(StringAttr base, const Twine &suffix)
Return a attribute with the specified suffix appended.
A namespace that is used to store existing names and generate new names in some scope within the IR.
Definition: Namespace.h:29
void add(SymbolCache &symCache)
SymbolCache initializer; initialize from every key that is convertible to a StringAttr in the SymbolC...
Definition: Namespace.h:47
void addDefinitions(mlir::Operation *top)
Populate the symbol cache with all symbol-defining operations within the 'top' operation.
Definition: SymCache.cpp:23
Default symbol cache implementation; stores associations between names (StringAttr's) to mlir::Operat...
Definition: SymCache.h:85
def create(low_bit, result_type, input=None)
Definition: comb.py:187
def create(dest, src)
Definition: sv.py:98
Definition: sv.py:15
def create(value)
Definition: sv.py:106
Definition: sv.py:68
Definition: sv.py:35
def create(data_type, name=None, sym_name=None)
Definition: sv.py:61
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition: CalyxOps.cpp:54
ReadEnableMode
Definition: SeqEnums.h:15
std::unique_ptr< mlir::Pass > createHWMemSimImplPass(const HWMemSimImplOptions &options={})
void setSVAttributes(mlir::Operation *op, mlir::ArrayAttr attrs)
Set the SV attributes of an operation.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition: DebugAnalysis.h:21
Definition: hw.py:1
Definition: seq.py:1
def reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)
Definition: seq.py:20