CIRCT  20.0.0git
HWMemSimImpl.cpp
Go to the documentation of this file.
1 //===- HWMemSimImpl.cpp - HW Memory Implementation Pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This transformation pass converts generated FIRRTL memory modules to
10 // simulation models.
11 //
12 //===----------------------------------------------------------------------===//
13 
17 #include "circt/Dialect/HW/HWOps.h"
20 #include "circt/Dialect/SV/SVOps.h"
24 #include "mlir/IR/ImplicitLocOpBuilder.h"
25 #include "mlir/Pass/Pass.h"
26 #include "llvm/ADT/TypeSwitch.h"
27 #include "llvm/Support/Path.h"
28 
29 using namespace circt;
30 using namespace hw;
31 using namespace seq;
32 
33 namespace circt {
34 namespace seq {
35 #define GEN_PASS_DEF_HWMEMSIMIMPL
36 #include "circt/Dialect/Seq/SeqPasses.h.inc"
37 } // namespace seq
38 } // namespace circt
39 
40 //===----------------------------------------------------------------------===//
41 // HWMemSimImplPass Pass
42 //===----------------------------------------------------------------------===//
43 
44 namespace {
45 
46 class HWMemSimImpl {
47  ReadEnableMode readEnableMode;
48  bool addMuxPragmas;
49  bool disableMemRandomization;
50  bool disableRegRandomization;
51  bool addVivadoRAMAddressConflictSynthesisBugWorkaround;
52 
53  SmallVector<sv::RegOp> registers;
54 
55  Value addPipelineStages(ImplicitLocOpBuilder &b,
56  hw::InnerSymbolNamespace &moduleNamespace,
57  size_t stages, Value clock, Value data,
58  const Twine &name, Value gate = {});
59  sv::AlwaysOp lastPipelineAlwaysOp;
60 
61 public:
62  Namespace &mlirModuleNamespace;
63 
64  HWMemSimImpl(ReadEnableMode readEnableMode, bool addMuxPragmas,
65  bool disableMemRandomization, bool disableRegRandomization,
66  bool addVivadoRAMAddressConflictSynthesisBugWorkaround,
67  Namespace &mlirModuleNamespace)
68  : readEnableMode(readEnableMode), addMuxPragmas(addMuxPragmas),
69  disableMemRandomization(disableMemRandomization),
70  disableRegRandomization(disableRegRandomization),
71  addVivadoRAMAddressConflictSynthesisBugWorkaround(
72  addVivadoRAMAddressConflictSynthesisBugWorkaround),
73  mlirModuleNamespace(mlirModuleNamespace) {}
74 
75  void generateMemory(HWModuleOp op, FirMemory mem);
76 };
77 
78 struct HWMemSimImplPass : public impl::HWMemSimImplBase<HWMemSimImplPass> {
79  using HWMemSimImplBase::HWMemSimImplBase;
80 
81  void runOnOperation() override;
82 };
83 
84 } // end anonymous namespace
85 
86 /// A helper that returns true if a value definition (or block argument) is
87 /// visible to another operation, either because it's a block argument or
88 /// because the defining op is before that other op.
89 static bool valueDefinedBeforeOp(Value value, Operation *op) {
90  Operation *valueOp = value.getDefiningOp();
91  Block *valueBlock =
92  valueOp ? valueOp->getBlock() : cast<BlockArgument>(value).getOwner();
93  while (op->getBlock() && op->getBlock() != valueBlock)
94  op = op->getParentOp();
95  return valueBlock == op->getBlock() &&
96  (!valueOp || valueOp->isBeforeInBlock(op));
97 }
98 
99 //
100 // Construct memory read annotated with mux pragmas in the following
101 // form:
102 // ```
103 // wire GEN;
104 // /* synopsys infer_mux_override */
105 // assign GEN = memory[addr] /* cadence map_to_mux */;
106 // ```
107 // If `addMuxPragmas` is enabled, just return the read value without
108 // annotations.
109 static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr,
110  bool addMuxPragmas) {
111  auto slot =
112  b.create<sv::ReadInOutOp>(b.create<sv::ArrayIndexInOutOp>(memory, addr));
113  // If we don't want to add mux pragmas, just return the read value.
114  if (!addMuxPragmas ||
115  cast<hw::UnpackedArrayType>(
116  cast<hw::InOutType>(memory.getType()).getElementType())
117  .getNumElements() <= 1)
118  return slot;
120  slot, sv::SVAttributeAttr::get(b.getContext(), "cadence map_to_mux",
121  /*emitAsComment=*/true));
122  auto valWire = b.create<sv::WireOp>(slot.getType());
123  auto assignOp = b.create<sv::AssignOp>(valWire, slot);
124  sv::setSVAttributes(assignOp,
125  sv::SVAttributeAttr::get(b.getContext(),
126  "synopsys infer_mux_override",
127  /*emitAsComment=*/true));
128 
129  return b.create<sv::ReadInOutOp>(valWire);
130 }
131 
132 Value HWMemSimImpl::addPipelineStages(ImplicitLocOpBuilder &b,
133  hw::InnerSymbolNamespace &moduleNamespace,
134  size_t stages, Value clock, Value data,
135  const Twine &name, Value gate) {
136  if (!stages)
137  return data;
138 
139  // Try to reuse the previous always block. This is only possible if the clocks
140  // agree and the data and gate all dominate the always block.
141  auto alwaysOp = lastPipelineAlwaysOp;
142  if (alwaysOp) {
143  if (alwaysOp.getClocks() != ValueRange{clock} ||
144  !valueDefinedBeforeOp(data, alwaysOp) ||
145  (gate && !valueDefinedBeforeOp(gate, alwaysOp)))
146  alwaysOp = {};
147  }
148  if (!alwaysOp)
149  alwaysOp = b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock);
150 
151  // Add the necessary registers.
152  auto savedIP = b.saveInsertionPoint();
153  SmallVector<sv::RegOp> regs;
154  b.setInsertionPoint(alwaysOp);
155  for (unsigned i = 0; i < stages; ++i) {
156  auto regName =
157  b.getStringAttr(moduleNamespace.newName("_" + name + "_d" + Twine(i)));
158  auto reg = b.create<sv::RegOp>(data.getType(), regName,
159  hw::InnerSymAttr::get(regName));
160  regs.push_back(reg);
161  registers.push_back(reg);
162  }
163 
164  // Populate the assignments in the always block.
165  b.setInsertionPointToEnd(alwaysOp.getBodyBlock());
166  for (unsigned i = 0; i < stages; ++i) {
167  if (i > 0)
168  data = b.create<sv::ReadInOutOp>(data);
169  auto emitAssign = [&] { b.create<sv::PAssignOp>(regs[i], data); };
170  if (gate)
171  b.create<sv::IfOp>(gate, [&]() { emitAssign(); });
172  else
173  emitAssign();
174  data = regs[i];
175  gate = {};
176  }
177  b.restoreInsertionPoint(savedIP);
178  data = b.create<sv::ReadInOutOp>(data);
179 
180  lastPipelineAlwaysOp = alwaysOp;
181  return data;
182 }
183 
184 void HWMemSimImpl::generateMemory(HWModuleOp op, FirMemory mem) {
185  ImplicitLocOpBuilder b(op.getLoc(), op.getBody());
186 
187  InnerSymbolNamespace moduleNamespace(op);
188 
189  // Compute total number of mask bits.
190  if (mem.maskGran == 0)
191  mem.maskGran = mem.dataWidth;
192  auto maskBits = mem.dataWidth / mem.maskGran;
193  bool isMasked = maskBits > 1;
194  // Each mask bit controls mask-granularity number of data bits.
195  auto dataType = b.getIntegerType(mem.dataWidth);
196 
197  // Count the total number of ports.
198  unsigned numPorts =
199  mem.numReadPorts + mem.numWritePorts + mem.numReadWritePorts;
200 
201  // Create registers for the memory.
202  sv::RegOp reg = b.create<sv::RegOp>(
203  UnpackedArrayType::get(dataType, mem.depth), b.getStringAttr("Memory"));
204 
205  if (addVivadoRAMAddressConflictSynthesisBugWorkaround) {
206  if (mem.readLatency == 0) {
207  // If the read latency is zero, we regard the memory as write-first.
208  // We add a SV attribute to specify a ram style to use LUTs for Vivado
209  // to avoid a bug that miscompiles the write-first memory. See "RAM
210  // address conflict and Vivado synthesis bug" issue in the vivado forum
211  // for the more detail.
213  reg, sv::SVAttributeAttr::get(b.getContext(), "ram_style",
214  R"("distributed")",
215  /*emitAsComment=*/false));
216  } else if (mem.readLatency == 1 && numPorts > 1) {
217  // If the read address is registered and the RAM has multiple ports,
218  // force write-first behaviour by setting rw_addr_collision. This avoids
219  // unpredictable behaviour. Downstreams flows should watch for `VPL
220  // 8-6430`.
222  reg, sv::SVAttributeAttr::get(b.getContext(), "rw_addr_collision",
223  R"("yes")", /*emitAsComment=*/false));
224  }
225  }
226 
227  SmallVector<Value, 4> outputs;
228 
229  size_t inArg = 0;
230  for (size_t i = 0; i < mem.numReadPorts; ++i) {
231  Value addr = op.getBody().getArgument(inArg++);
232  Value en = op.getBody().getArgument(inArg++);
233  Value clock = op.getBody().getArgument(inArg++);
234  // Add pipeline stages
235  if (readEnableMode == ReadEnableMode::Ignore) {
236  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
237  auto enLast = en;
238  if (j < e - 1)
239  en = addPipelineStages(b, moduleNamespace, 1, clock, en,
240  "R" + Twine(i) + "_en");
241  addr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
242  "R" + Twine(i) + "_addr", enLast);
243  }
244  } else {
245  en = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, en,
246  "R" + Twine(i) + "_en");
247  addr = addPipelineStages(b, moduleNamespace, mem.readLatency, clock, addr,
248  "R" + Twine(i) + "_addr");
249  }
250 
251  // Read Logic
252  Value rdata = getMemoryRead(b, reg, addr, addMuxPragmas);
253  switch (readEnableMode) {
254  case ReadEnableMode::Undefined: {
255  Value x = b.create<sv::ConstantXOp>(rdata.getType());
256  rdata = b.create<comb::MuxOp>(en, rdata, x, false);
257  break;
258  }
259  case ReadEnableMode::Zero: {
260  Value x = b.create<hw::ConstantOp>(rdata.getType(), 0);
261  rdata = b.create<comb::MuxOp>(en, rdata, x, false);
262  break;
263  }
264  case ReadEnableMode::Ignore:
265  break;
266  }
267  outputs.push_back(rdata);
268  }
269 
270  for (size_t i = 0; i < mem.numReadWritePorts; ++i) {
271  auto numReadStages = mem.readLatency;
272  auto numWriteStages = mem.writeLatency - 1;
273  auto numCommonStages = std::min(numReadStages, numWriteStages);
274  Value addr = op.getBody().getArgument(inArg++);
275  Value en = op.getBody().getArgument(inArg++);
276  Value clock = op.getBody().getArgument(inArg++);
277  Value wmode = op.getBody().getArgument(inArg++);
278  Value wdataIn = op.getBody().getArgument(inArg++);
279  Value wmaskBits;
280  // There are no input mask ports, if maskBits =1. Create a dummy true value
281  // for mask.
282  if (isMasked)
283  wmaskBits = op.getBody().getArgument(inArg++);
284  else
285  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
286 
287  // Add common pipeline stages.
288  addr = addPipelineStages(b, moduleNamespace, numCommonStages, clock, addr,
289  "RW" + Twine(i) + "_addr");
290  en = addPipelineStages(b, moduleNamespace, numCommonStages, clock, en,
291  "RW" + Twine(i) + "_en");
292  wmode = addPipelineStages(b, moduleNamespace, numCommonStages, clock, wmode,
293  "RW" + Twine(i) + "_mode");
294 
295  // Add read-only pipeline stages.
296  Value readAddr = addr;
297  Value readEn = en;
298  if (readEnableMode == ReadEnableMode::Ignore) {
299  for (size_t j = 0, e = mem.readLatency; j != e; ++j) {
300  auto enLast = en;
301  if (j < e - 1)
302  readEn = addPipelineStages(b, moduleNamespace, 1, clock, en,
303  "RW" + Twine(i) + "_ren");
304  readAddr = addPipelineStages(b, moduleNamespace, 1, clock, addr,
305  "RW" + Twine(i) + "_raddr", enLast);
306  }
307  } else {
308  readAddr =
309  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
310  clock, addr, "RW" + Twine(i) + "_raddr");
311  readEn =
312  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
313  clock, en, "RW" + Twine(i) + "_ren");
314  }
315  auto readWMode =
316  addPipelineStages(b, moduleNamespace, numReadStages - numCommonStages,
317  clock, wmode, "RW" + Twine(i) + "_rmode");
318 
319  // Add write-only pipeline stages.
320  auto writeAddr =
321  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
322  clock, addr, "RW" + Twine(i) + "_waddr");
323  auto writeEn =
324  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
325  clock, en, "RW" + Twine(i) + "_wen");
326  auto writeWMode =
327  addPipelineStages(b, moduleNamespace, numWriteStages - numCommonStages,
328  clock, wmode, "RW" + Twine(i) + "_wmode");
329  wdataIn = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
330  wdataIn, "RW" + Twine(i) + "_wdata");
331  if (isMasked)
332  wmaskBits = addPipelineStages(b, moduleNamespace, numWriteStages, clock,
333  wmaskBits, "RW" + Twine(i) + "_wmask");
334 
335  SmallVector<Value, 4> maskValues(maskBits);
336  SmallVector<Value, 4> dataValues(maskBits);
337  // For multi-bit mask, extract corresponding write data bits of
338  // mask-granularity size each. Each of the extracted data bits will be
339  // written to a register, gaurded by the corresponding mask bit.
340  for (size_t i = 0; i < maskBits; ++i) {
341  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
342  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
343  mem.maskGran);
344  }
345 
346  // wire to store read result
347  auto rWire = b.create<sv::WireOp>(wdataIn.getType());
348  Value rdata = b.create<sv::ReadInOutOp>(rWire);
349 
350  // Read logic.
351  Value rcond = b.createOrFold<comb::AndOp>(
352  readEn,
353  b.createOrFold<comb::ICmpOp>(
354  comb::ICmpPredicate::eq, readWMode,
355  b.createOrFold<ConstantOp>(readWMode.getType(), 0), false),
356  false);
357 
358  auto val = getMemoryRead(b, reg, readAddr, addMuxPragmas);
359 
360  switch (readEnableMode) {
361  case ReadEnableMode::Undefined: {
362  Value x = b.create<sv::ConstantXOp>(val.getType());
363  val = b.create<comb::MuxOp>(rcond, val, x, false);
364  break;
365  }
366  case ReadEnableMode::Zero: {
367  Value x = b.create<hw::ConstantOp>(val.getType(), 0);
368  val = b.create<comb::MuxOp>(rcond, val, x, false);
369  break;
370  }
371  case ReadEnableMode::Ignore:
372  break;
373  }
374  b.create<sv::AssignOp>(rWire, val);
375 
376  // Write logic gaurded by the corresponding mask bit.
377  for (auto wmask : llvm::enumerate(maskValues)) {
378  b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock, [&]() {
379  auto wcond = b.createOrFold<comb::AndOp>(
380  writeEn,
381  b.createOrFold<comb::AndOp>(wmask.value(), writeWMode, false),
382  false);
383  b.create<sv::IfOp>(wcond, [&]() {
384  Value slotReg = b.create<sv::ArrayIndexInOutOp>(reg, writeAddr);
385  b.create<sv::PAssignOp>(
386  b.createOrFold<sv::IndexedPartSelectInOutOp>(
387  slotReg,
388  b.createOrFold<ConstantOp>(b.getIntegerType(32),
389  wmask.index() * mem.maskGran),
390  mem.maskGran),
391  dataValues[wmask.index()]);
392  });
393  });
394  }
395  outputs.push_back(rdata);
396  }
397 
398  DenseMap<unsigned, Operation *> writeProcesses;
399  for (size_t i = 0; i < mem.numWritePorts; ++i) {
400  auto numStages = mem.writeLatency - 1;
401  Value addr = op.getBody().getArgument(inArg++);
402  Value en = op.getBody().getArgument(inArg++);
403  Value clock = op.getBody().getArgument(inArg++);
404  Value wdataIn = op.getBody().getArgument(inArg++);
405  Value wmaskBits;
406  // There are no input mask ports, if maskBits =1. Create a dummy true value
407  // for mask.
408  if (isMasked)
409  wmaskBits = op.getBody().getArgument(inArg++);
410  else
411  wmaskBits = b.create<ConstantOp>(b.getIntegerAttr(en.getType(), 1));
412  // Add pipeline stages
413  addr = addPipelineStages(b, moduleNamespace, numStages, clock, addr,
414  "W" + Twine(i) + "addr");
415  en = addPipelineStages(b, moduleNamespace, numStages, clock, en,
416  "W" + Twine(i) + "en");
417  wdataIn = addPipelineStages(b, moduleNamespace, numStages, clock, wdataIn,
418  "W" + Twine(i) + "data");
419  if (isMasked)
420  wmaskBits = addPipelineStages(b, moduleNamespace, numStages, clock,
421  wmaskBits, "W" + Twine(i) + "mask");
422 
423  SmallVector<Value, 4> maskValues(maskBits);
424  SmallVector<Value, 4> dataValues(maskBits);
425  // For multi-bit mask, extract corresponding write data bits of
426  // mask-granularity size each. Each of the extracted data bits will be
427  // written to a register, gaurded by the corresponding mask bit.
428  for (size_t i = 0; i < maskBits; ++i) {
429  maskValues[i] = b.createOrFold<comb::ExtractOp>(wmaskBits, i, 1);
430  dataValues[i] = b.createOrFold<comb::ExtractOp>(wdataIn, i * mem.maskGran,
431  mem.maskGran);
432  }
433  // Build write port logic.
434  auto writeLogic = [&] {
435  // For each register, create the connections to write the corresponding
436  // data into it.
437  for (auto wmask : llvm::enumerate(maskValues)) {
438  // Guard by corresponding mask bit.
439  auto wcond = b.createOrFold<comb::AndOp>(en, wmask.value(), false);
440  b.create<sv::IfOp>(wcond, [&]() {
441  auto slot = b.create<sv::ArrayIndexInOutOp>(reg, addr);
442  b.create<sv::PAssignOp>(
443  b.createOrFold<sv::IndexedPartSelectInOutOp>(
444  slot,
445  b.createOrFold<ConstantOp>(b.getIntegerType(32),
446  wmask.index() * mem.maskGran),
447  mem.maskGran),
448  dataValues[wmask.index()]);
449  });
450  }
451  };
452 
453  // Build a new always block with write port logic.
454  auto alwaysBlock = [&] {
455  return b.create<sv::AlwaysOp>(sv::EventControl::AtPosEdge, clock,
456  [&]() { writeLogic(); });
457  };
458 
459  switch (mem.writeUnderWrite) {
460  // Undefined write order: lower each write port into a separate always
461  // block.
462  case seq::WUW::Undefined:
463  alwaysBlock();
464  break;
465  // Port-ordered write order: lower each write port into an always block
466  // based on its clock ID.
467  case seq::WUW::PortOrder:
468  if (auto *existingAlwaysBlock =
469  writeProcesses.lookup(mem.writeClockIDs[i])) {
470  OpBuilder::InsertionGuard guard(b);
471  b.setInsertionPointToEnd(
472  cast<sv::AlwaysOp>(existingAlwaysBlock).getBodyBlock());
473  writeLogic();
474  } else {
475  writeProcesses[i] = alwaysBlock();
476  }
477  }
478  }
479 
480  auto *outputOp = op.getBodyBlock()->getTerminator();
481  outputOp->setOperands(outputs);
482 
483  // Add logic to initialize the memory based on a file emission request. This
484  // disables randomization.
485  if (!mem.initFilename.empty()) {
486  // Set an inner symbol on the register if one does not exist.
487  if (!reg.getInnerSymAttr())
488  reg.setInnerSymAttr(hw::InnerSymAttr::get(
489  b.getStringAttr(moduleNamespace.newName(reg.getName()))));
490 
491  if (mem.initIsInline) {
492  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
493  b.create<sv::InitialOp>([&]() {
494  b.create<sv::ReadMemOp>(reg, mem.initFilename,
495  mem.initIsBinary
496  ? MemBaseTypeAttr::MemBaseBin
497  : MemBaseTypeAttr::MemBaseHex);
498  });
499  });
500  } else {
501  OpBuilder::InsertionGuard guard(b);
502 
503  // Assign a name to the bound module.
504  StringAttr boundModuleName =
505  b.getStringAttr(mlirModuleNamespace.newName(op.getName() + "_init"));
506 
507  // Generate a name for the file containing the bound module and the bind.
508  StringAttr filename;
509  if (auto fileAttr = op->getAttrOfType<OutputFileAttr>("output_file")) {
510  if (!fileAttr.isDirectory()) {
511  SmallString<128> path(fileAttr.getFilename().getValue());
512  llvm::sys::path::remove_filename(path);
513  llvm::sys::path::append(path, boundModuleName.getValue() + ".sv");
514  filename = b.getStringAttr(path);
515  } else {
516  filename = fileAttr.getFilename();
517  }
518  } else {
519  filename = b.getStringAttr(boundModuleName.getValue() + ".sv");
520  }
521 
522  // Create a new module with the readmem op.
523  b.setInsertionPointAfter(op);
524  auto boundModule =
525  b.create<HWModuleOp>(boundModuleName, ArrayRef<PortInfo>());
526 
527  // Build the hierpathop
528  auto path = b.create<hw::HierPathOp>(
529  mlirModuleNamespace.newName(op.getName() + "_path"),
530  b.getArrayAttr(
531  ::InnerRefAttr::get(op.getNameAttr(), reg.getInnerNameAttr())));
532 
533  b.setInsertionPointToStart(boundModule.getBodyBlock());
534  b.create<sv::InitialOp>([&]() {
535  auto xmr = b.create<sv::XMRRefOp>(reg.getType(), path.getSymNameAttr());
536  b.create<sv::ReadMemOp>(xmr, mem.initFilename,
537  mem.initIsBinary ? MemBaseTypeAttr::MemBaseBin
538  : MemBaseTypeAttr::MemBaseHex);
539  });
540 
541  // Instantiate this new module inside the memory module.
542  b.setInsertionPointAfter(reg);
543  auto boundInstance = b.create<hw::InstanceOp>(
544  boundModule, boundModule.getName(), ArrayRef<Value>());
545  boundInstance->setAttr(
546  "inner_sym",
547  hw::InnerSymAttr::get(b.getStringAttr(
548  moduleNamespace.newName(boundInstance.getInstanceName()))));
549  boundInstance.setDoNotPrintAttr(b.getUnitAttr());
550 
551  // Build the file container and reference the module from it.
552  b.setInsertionPointAfter(op);
553  b.create<emit::FileOp>(filename, [&] {
554  b.create<emit::RefOp>(FlatSymbolRefAttr::get(boundModuleName));
555  b.create<sv::BindOp>(hw::InnerRefAttr::get(
556  op.getNameAttr(), boundInstance.getInnerSymAttr().getSymName()));
557  });
558  }
559  }
560 
561  // Add logic to initialize the memory and any internal registers to random
562  // values.
563  if (disableMemRandomization && disableRegRandomization)
564  return;
565 
566  constexpr unsigned randomWidth = 32;
567  b.create<sv::IfDefOp>("ENABLE_INITIAL_MEM_", [&]() {
568  sv::RegOp randReg;
569  SmallVector<sv::RegOp> randRegs;
570  if (!disableRegRandomization) {
571  b.create<sv::IfDefOp>("RANDOMIZE_REG_INIT", [&]() {
572  signed totalWidth = 0;
573  for (sv::RegOp &reg : registers)
574  totalWidth += reg.getElementType().getIntOrFloatBitWidth();
575  while (totalWidth > 0) {
576  auto name = b.getStringAttr(moduleNamespace.newName("_RANDOM"));
577  auto innerSym = hw::InnerSymAttr::get(name);
578  randRegs.push_back(b.create<sv::RegOp>(b.getIntegerType(randomWidth),
579  name, innerSym));
580  totalWidth -= randomWidth;
581  }
582  });
583  }
584  auto randomMemReg = b.create<sv::RegOp>(
585  b.getIntegerType(llvm::divideCeil(mem.dataWidth, randomWidth) *
586  randomWidth),
587  b.getStringAttr("_RANDOM_MEM"));
588  b.create<sv::InitialOp>([&]() {
589  b.create<sv::VerbatimOp>("`INIT_RANDOM_PROLOG_");
590 
591  // Memory randomization logic. The entire memory is randomized.
592  if (!disableMemRandomization) {
593  b.create<sv::IfDefProceduralOp>("RANDOMIZE_MEM_INIT", [&]() {
594  auto outerLoopIndVarType =
595  b.getIntegerType(llvm::Log2_64_Ceil(mem.depth + 1));
596  auto innerUpperBoundWidth =
597  cast<IntegerType>(randomMemReg.getType().getElementType())
598  .getWidth();
599  auto innerLoopIndVarType =
600  b.getIntegerType(llvm::Log2_64_Ceil(innerUpperBoundWidth + 1));
601  // Construct the following nested for loops:
602  // ```
603  // for (int i = 0; i < mem.depth; i++) begin
604  // for (int j = 0; j < randomMeg.size; j += 32)
605  // randomMem[j+31:j] = `RANDOM
606  // Memory[i] = randomMem[mem.dataWidth - 1: 0];
607  // ```
608  b.create<sv::ForOp>(
609  0, mem.depth, 1, outerLoopIndVarType, "i",
610  [&](BlockArgument outerIndVar) {
611  b.create<sv::ForOp>(
612  0, innerUpperBoundWidth, randomWidth, innerLoopIndVarType,
613  "j", [&](BlockArgument innerIndVar) {
614  auto rhs = b.create<sv::MacroRefExprSEOp>(
615  b.getIntegerType(randomWidth), "RANDOM");
616  auto lhs = b.create<sv::IndexedPartSelectInOutOp>(
617  randomMemReg, innerIndVar, randomWidth, false);
618  b.create<sv::BPAssignOp>(lhs, rhs);
619  });
620 
621  Value iterValue = outerIndVar;
622  // Truncate the induction variable if necessary.
623  if (!outerIndVar.getType().isInteger(
624  llvm::Log2_64_Ceil(mem.depth)))
625  iterValue = b.createOrFold<comb::ExtractOp>(
626  iterValue, 0, llvm::Log2_64_Ceil(mem.depth));
627  auto lhs = b.create<sv::ArrayIndexInOutOp>(reg, iterValue);
628  auto rhs = b.createOrFold<comb::ExtractOp>(
629  b.create<sv::ReadInOutOp>(randomMemReg), 0, mem.dataWidth);
630  b.create<sv::BPAssignOp>(lhs, rhs);
631  });
632  });
633  }
634 
635  // Register randomization logic. Randomize every register to a random
636  // making efficient use of available randomization registers.
637  //
638  // TODO: This shares a lot of common logic with LowerToHW. Combine
639  // these two in a common randomization utility.
640  if (!disableRegRandomization) {
641  b.create<sv::IfDefProceduralOp>("RANDOMIZE_REG_INIT", [&]() {
642  unsigned bits = randomWidth;
643  for (sv::RegOp &reg : randRegs)
644  b.create<sv::VerbatimOp>(
645  b.getStringAttr("{{0}} = {`RANDOM};"), ValueRange{},
646  b.getArrayAttr(hw::InnerRefAttr::get(op.getNameAttr(),
647  reg.getInnerNameAttr())));
648  auto randRegIdx = 0;
649  for (sv::RegOp &reg : registers) {
650  SmallVector<std::pair<Attribute, std::pair<size_t, size_t>>> values;
651  auto width = reg.getElementType().getIntOrFloatBitWidth();
652  auto widthRemaining = width;
653  while (widthRemaining > 0) {
654  if (bits == randomWidth) {
655  randReg = randRegs[randRegIdx++];
656  bits = 0;
657  }
658  auto innerRef = hw::InnerRefAttr::get(op.getNameAttr(),
659  randReg.getInnerNameAttr());
660  if (widthRemaining <= randomWidth - bits) {
661  values.push_back({innerRef, {bits + widthRemaining - 1, bits}});
662  bits += widthRemaining;
663  widthRemaining = 0;
664  continue;
665  }
666  values.push_back({innerRef, {randomWidth - 1, bits}});
667  widthRemaining -= (randomWidth - bits);
668  bits = randomWidth;
669  }
670  SmallString<32> rhs("{{0}} = ");
671  unsigned idx = 1;
672  assert(reg.getInnerSymAttr());
673  SmallVector<Attribute, 4> symbols({hw::InnerRefAttr::get(
674  op.getNameAttr(), reg.getInnerNameAttr())});
675  if (values.size() > 1)
676  rhs.append("{");
677  for (auto &v : values) {
678  if (idx > 1)
679  rhs.append(", ");
680  auto [sym, range] = v;
681  symbols.push_back(sym);
682  rhs.append(("{{" + Twine(idx++) + "}}").str());
683  // Do not emit a part select as the whole value is used.
684  if (range.first == randomWidth - 1 && range.second == 0)
685  continue;
686  // Emit a single bit part select, e.g., "[3]"
687  if (range.first == range.second) {
688  rhs.append(("[" + Twine(range.first) + "]").str());
689  continue;
690  }
691  // Emit a part select, e.g., "[4:2]"
692  rhs.append(
693  ("[" + Twine(range.first) + ":" + Twine(range.second) + "]")
694  .str());
695  }
696  if (values.size() > 1)
697  rhs.append("}");
698  rhs.append(";");
699  b.create<sv::VerbatimOp>(rhs, ValueRange{},
700  b.getArrayAttr(symbols));
701  }
702  });
703  }
704  });
705  });
706 }
707 
708 void HWMemSimImplPass::runOnOperation() {
709  auto topModule = getOperation();
710 
711  // Populate a namespace from the symbols visible to the top-level MLIR module.
712  // Memories with initializations create modules and these need to be legal
713  // symbols.
714  SymbolCache symbolCache;
715  symbolCache.addDefinitions(topModule);
716  Namespace mlirModuleNamespace;
717  mlirModuleNamespace.add(symbolCache);
718 
719  SmallVector<HWModuleGeneratedOp> toErase;
720  bool anythingChanged = false;
721 
722  for (auto op :
723  llvm::make_early_inc_range(topModule.getOps<HWModuleGeneratedOp>())) {
724  auto oldModule = cast<HWModuleGeneratedOp>(op);
725  auto gen = oldModule.getGeneratorKind();
726  auto genOp = cast<HWGeneratorSchemaOp>(
727  SymbolTable::lookupSymbolIn(getOperation(), gen));
728 
729  if (genOp.getDescriptor() == "FIRRTL_Memory") {
730  FirMemory mem(oldModule);
731 
732  OpBuilder builder(oldModule);
733  auto nameAttr = builder.getStringAttr(oldModule.getName());
734 
735  // The requirements for macro replacement:
736  // 1. read latency and write latency of one.
737  // 2. undefined read-under-write behavior.
738  if (replSeqMem && ((mem.readLatency == 1 && mem.writeLatency == 1) &&
739  mem.dataWidth > 0)) {
740  builder.create<HWModuleExternOp>(oldModule.getLoc(), nameAttr,
741  oldModule.getPortList());
742  } else {
743  auto newModule = builder.create<HWModuleOp>(
744  oldModule.getLoc(), nameAttr, oldModule.getPortList());
745  if (auto outdir = oldModule->getAttr("output_file"))
746  newModule->setAttr("output_file", outdir);
747  newModule.setCommentAttr(
748  builder.getStringAttr("VCS coverage exclude_file"));
749  newModule.setPrivate();
750 
751  HWMemSimImpl(readEnableMode, addMuxPragmas, disableMemRandomization,
752  disableRegRandomization,
753  addVivadoRAMAddressConflictSynthesisBugWorkaround,
754  mlirModuleNamespace)
755  .generateMemory(newModule, mem);
756  }
757 
758  oldModule.erase();
759  anythingChanged = true;
760  }
761  }
762 
763  if (!anythingChanged)
764  markAllAnalysesPreserved();
765 }
766 
767 std::unique_ptr<Pass>
768 circt::seq::createHWMemSimImplPass(const HWMemSimImplOptions &options) {
769  return std::make_unique<HWMemSimImplPass>(options);
770 }
assert(baseType &&"element must be base type")
static bool valueDefinedBeforeOp(Value value, Operation *op)
A helper that returns true if a value definition (or block argument) is visible to another operation,...
static Value getMemoryRead(ImplicitLocOpBuilder &b, Value memory, Value addr, bool addMuxPragmas)
static StringAttr append(StringAttr base, const Twine &suffix)
Return a attribute with the specified suffix appended.
static Block * getBodyBlock(FModuleLike mod)
A namespace that is used to store existing names and generate new names in some scope within the IR.
Definition: Namespace.h:30
void add(mlir::ModuleOp module)
Definition: Namespace.h:48
void addDefinitions(mlir::Operation *top)
Populate the symbol cache with all symbol-defining operations within the 'top' operation.
Definition: SymCache.cpp:23
Default symbol cache implementation; stores associations between names (StringAttr's) to mlir::Operat...
Definition: SymCache.h:85
def create(low_bit, result_type, input=None)
Definition: comb.py:187
def create(dest, src)
Definition: sv.py:98
Definition: sv.py:15
def create(value)
Definition: sv.py:106
Definition: sv.py:68
Definition: sv.py:35
def create(data_type, name=None, sym_name=None)
Definition: sv.py:61
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition: CalyxOps.cpp:55
ReadEnableMode
Definition: SeqEnums.h:15
std::unique_ptr< mlir::Pass > createHWMemSimImplPass(const HWMemSimImplOptions &options={})
void setSVAttributes(mlir::Operation *op, mlir::ArrayAttr attrs)
Set the SV attributes of an operation.
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition: DebugAnalysis.h:21
Definition: hw.py:1
Definition: seq.py:1
def reg(value, clock, reset=None, reset_value=None, name=None, sym_name=None)
Definition: seq.py:21