CIRCT 23.0.0git
Loading...
Searching...
No Matches
ResourceUsageAnalysis.cpp
Go to the documentation of this file.
1//===- ResourceUsageAnalysis.cpp - Resource Usage Analysis ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the resource usage analysis for the Synth dialect.
10// The analysis computes resource utilization including and-inverter gates,
11// DFF bits, and LUTs across module hierarchies.
12//
13//===----------------------------------------------------------------------===//
14
23#include "circt/Support/LLVM.h"
24#include "mlir/IR/BuiltinOps.h"
25#include "mlir/Pass/AnalysisManager.h"
26#include "mlir/Support/FileUtilities.h"
27#include "llvm/ADT/ScopeExit.h"
28#include "llvm/Support/JSON.h"
29#include "llvm/Support/ToolOutputFile.h"
30
31namespace circt {
32namespace synth {
33#define GEN_PASS_DEF_PRINTRESOURCEUSAGEANALYSIS
34#include "circt/Dialect/Synth/Transforms/SynthPasses.h.inc"
35} // namespace synth
36} // namespace circt
37
38using namespace circt;
39using namespace synth;
40
41//===----------------------------------------------------------------------===//
42// ResourceUsageAnalysis Implementation
43//===----------------------------------------------------------------------===//
44
45/// Accumulate resource counts for an operation if it's a tracked resource type.
46/// Returns true if the operation was tracked, false otherwise.
47static bool accumulateResourceCounts(Operation *op,
48 llvm::StringMap<uint64_t> &counts) {
49 if (op->getNumResults() != 1 || !op->getResult(0).getType().isInteger())
50 return false;
51 return TypeSwitch<Operation *, bool>(op)
52 .Case<BooleanLogicOpInterface>([&](auto logicOp) {
53 if (auto areaCost = logicOp.getLogicAreaCost()) {
54 counts[op->getName().getStringRef()] += *areaCost;
55 return true;
56 }
57 return false;
58 })
59 // Variadic comb logic operations.
60 // Gate count = (num_inputs - 1) * bitwidth
61 .Case<comb::AndOp, comb::OrOp, comb::XorOp>([&](auto logicOp) {
62 counts[logicOp->getName().getStringRef()] +=
63 static_cast<uint64_t>(logicOp.getNumOperands() - 1) *
64 logicOp.getType().getIntOrFloatBitWidth();
65 return true;
66 })
67 // Truth tables (LUTs) - count both the total number of truth tables and
68 // the per-input breakdown.
69 .Case<comb::TruthTableOp>([&](auto op) {
70 uint64_t count = op.getType().getIntOrFloatBitWidth();
71 counts[op->getName().getStringRef()] += count;
72 std::string bucket = (Twine(op->getName().getStringRef()) + "_" +
73 Twine(op.getNumOperands()))
74 .str();
75 counts[bucket] += count;
76 return true;
77 })
78 // Sequential elements.
79 // Count = bitwidth
80 .Case<seq::CompRegOp, seq::FirRegOp>([&](auto op) {
81 uint64_t count = op.getType().getIntOrFloatBitWidth();
82 counts[op->getName().getStringRef()] += count;
83 return true;
84 })
85 .Default([](Operation *) { return false; });
86}
87
88ResourceUsageAnalysis::ResourceUsageAnalysis(Operation *moduleOp,
89 mlir::AnalysisManager &am)
90 : instanceGraph(&am.getAnalysis<igraph::InstanceGraph>()) {}
91
94 // Check cache first.
95 auto it = designUsageCache.find(moduleName);
96 if (it != designUsageCache.end())
97 return it->second.get();
98
99 // Lookup module in instance graph.
100 auto *node = instanceGraph->lookup(moduleName);
101 if (!node)
102 return nullptr;
103
104 return getResourceUsage(node->getModule());
105}
106
108ResourceUsageAnalysis::getResourceUsage(igraph::ModuleOpInterface module) {
109 // Check cache first.
110 auto cacheIt = designUsageCache.find(module.getModuleNameAttr());
111 if (cacheIt != designUsageCache.end())
112 return cacheIt->second.get();
113
114 auto *node = instanceGraph->lookup(module.getModuleNameAttr());
115
116 // Count local resources by walking all operations in the module.
117 llvm::StringMap<uint64_t> counts;
118 uint64_t unknownOpCount = 0;
119 module->walk([&](Operation *op) {
120 if (accumulateResourceCounts(op, counts))
121 return;
122 if (op->getNumResults() > 0 && !isa<hw::HWInstanceLike>(op) &&
123 !op->hasTrait<mlir::OpTrait::ConstantLike>()) {
124 // Track operations that has one result and is not a constant.
125 unknownOpCount++;
126 }
127 });
128
129 // Add unknown operation count if any were found.
130 if (unknownOpCount > 0)
131 counts["<unknown>"] = unknownOpCount;
132
133 // Initialize module usage with local counts.
134 // Total will be updated as we process child instances.
135 ResourceUsage local(std::move(counts));
136 auto moduleUsage = std::make_unique<ModuleResourceUsage>(
137 module.getModuleNameAttr(), local, local);
138
139 // Recursively process child module instances.
140 for (auto *child : *node) {
141 auto *targetNode = child->getTarget();
142
143 auto childModule = targetNode->getModule();
144
145 auto *instanceOp = child->getInstance().getOperation();
146 // Skip instances with no results or marked as "doNotPrint".
147 if (instanceOp->getNumResults() == 0 ||
148 instanceOp->hasAttrOfType<UnitAttr>("doNotPrint"))
149 continue;
150
151 // Recursively compute child usage and accumulate into total.
152 auto *childUsage = getResourceUsage(childModule);
153 moduleUsage->total += childUsage->total;
154 moduleUsage->instances.emplace_back(
155 childModule.getModuleNameAttr(),
156 child->getInstance().getInstanceNameAttr(), childUsage);
157 }
158
159 // Insert into cache and return.
160 auto [it, success] = designUsageCache.try_emplace(module.getModuleNameAttr(),
161 std::move(moduleUsage));
162 assert(success && "module already exists in cache");
163
164 return it->second.get();
165}
166
167//===----------------------------------------------------------------------===//
168// JSON Serialization
169//===----------------------------------------------------------------------===//
170
171/// Convert ResourceUsage to JSON object.
172static llvm::json::Object
173getModuleResourceUsageJSON(const ResourceUsageAnalysis::ResourceUsage &usage) {
174 llvm::json::Object obj;
175 for (const auto &count : usage.getCounts())
176 obj[count.getKey()] = count.second;
177 return obj;
178}
179
180/// Convert ModuleResourceUsage to JSON object with full hierarchy.
181/// This creates fully-elaborated information including all child instances.
182static llvm::json::Object getModuleResourceUsageJSON(
183 const ResourceUsageAnalysis::ModuleResourceUsage &usage) {
184 llvm::json::Object obj;
185 obj["moduleName"] = usage.moduleName.getValue();
186 obj["local"] = getModuleResourceUsageJSON(usage.getLocal());
187 obj["total"] = getModuleResourceUsageJSON(usage.getTotal());
188
189 // Serialize child instances recursively.
190 SmallVector<llvm::json::Value> instances;
191 for (const auto &instance : usage.instances) {
192 llvm::json::Object child;
193 child["instanceName"] = instance.instanceName.getValue();
194 child["moduleName"] = instance.moduleName.getValue();
195 child["usage"] = getModuleResourceUsageJSON(*instance.usage);
196 instances.push_back(std::move(child));
197 }
198 obj["instances"] = llvm::json::Array(instances);
199
200 return obj;
201}
202
204 raw_ostream &os) const {
205 os << getModuleResourceUsageJSON(*this);
206}
207
208namespace {
209struct PrintResourceUsageAnalysisPass
210 : public impl::PrintResourceUsageAnalysisBase<
211 PrintResourceUsageAnalysisPass> {
212 using PrintResourceUsageAnalysisBase::PrintResourceUsageAnalysisBase;
213
214 void runOnOperation() override;
215
216 /// Determine which modules to analyze based on options.
217 LogicalResult getTopModules(igraph::InstanceGraph *instanceGraph,
218 SmallVectorImpl<igraph::ModuleOpInterface> &tops);
219
220 /// Print analysis result for a single top module.
221 LogicalResult printAnalysisResult(ResourceUsageAnalysis &analysis,
222 igraph::ModuleOpInterface top,
223 llvm::raw_ostream *os,
224 llvm::json::OStream *jsonOS);
225};
226} // namespace
227
228LogicalResult PrintResourceUsageAnalysisPass::getTopModules(
230 SmallVectorImpl<igraph::ModuleOpInterface> &tops) {
231 auto mod = getOperation();
232
233 if (topModuleName.getValue().empty()) {
234 // Automatically infer top modules from instance graph.
235 auto topLevelNodes = instanceGraph->getInferredTopLevelNodes();
236 if (failed(topLevelNodes))
237 return mod.emitError()
238 << "failed to infer top-level modules from instance graph";
239
240 // Collect all ModuleOpInterface instances from top-level nodes.
241 for (auto *node : *topLevelNodes) {
242 if (auto module = node->getModule())
243 tops.push_back(module);
244 }
245
246 if (tops.empty())
247 return mod.emitError() << "no top-level modules found in instance graph";
248 } else {
249 // Use user-specified top module name.
250 auto *node = instanceGraph->lookup(
251 mlir::StringAttr::get(mod.getContext(), topModuleName.getValue()));
252 if (!node)
253 return mod.emitError()
254 << "top module '" << topModuleName.getValue() << "' not found";
255
256 tops.push_back(node->getModule());
257 }
258
259 return success();
260}
261
262LogicalResult PrintResourceUsageAnalysisPass::printAnalysisResult(
263 ResourceUsageAnalysis &analysis, igraph::ModuleOpInterface top,
264 llvm::raw_ostream *os, llvm::json::OStream *jsonOS) {
265 auto *usage = analysis.getResourceUsage(top);
266 if (!usage)
267 return failure();
268
269 if (jsonOS) {
270 usage->emitJSON(jsonOS->rawValueBegin());
271 jsonOS->rawValueEnd();
272 } else if (os) {
273 auto &stream = *os;
274 stream << "Resource Usage Analysis for module: "
275 << usage->moduleName.getValue() << "\n";
276 stream << "========================================\n";
277 stream << "Total:\n";
278
279 // Sort resource counts by name for consistent output.
280 SmallVector<std::pair<StringRef, uint64_t>> sortedCounts;
281 for (const auto &count : usage->getTotal().getCounts())
282 sortedCounts.emplace_back(count.getKey(), count.second);
283 llvm::sort(sortedCounts,
284 [](const auto &a, const auto &b) { return a.first < b.first; });
285
286 // Find the maximum name length for aligned formatting.
287 size_t maxNameLen = 0;
288 for (const auto &[name, count] : sortedCounts)
289 maxNameLen = std::max(maxNameLen, name.size());
290
291 // Print with aligned columns.
292 for (const auto &[name, count] : sortedCounts)
293 stream << " " << name << ": "
294 << std::string(maxNameLen - name.size(), ' ') << count << "\n";
295 stream << "\n";
296 }
297
298 return success();
299}
300
301void PrintResourceUsageAnalysisPass::runOnOperation() {
302 auto &resourceUsage = getAnalysis<ResourceUsageAnalysis>();
303 auto *instanceGraph = resourceUsage.getInstanceGraph();
304
305 // Determine which modules to analyze.
306 SmallVector<igraph::ModuleOpInterface> tops;
307 if (failed(getTopModules(instanceGraph, tops)))
308 return signalPassFailure();
309
310 // Open output file.
311 std::string error;
312 auto file = mlir::openOutputFile(outputFile.getValue(), &error);
313 if (!file) {
314 llvm::errs() << error;
315 return signalPassFailure();
316 }
317
318 auto &os = file->os();
319 std::unique_ptr<llvm::json::OStream> jsonOS;
320 if (emitJSON.getValue()) {
321 jsonOS = std::make_unique<llvm::json::OStream>(os);
322 jsonOS->arrayBegin();
323 }
324
325 // Ensure JSON array is properly closed on exit.
326 auto closeJson = llvm::scope_exit([&]() {
327 if (jsonOS)
328 jsonOS->arrayEnd();
329 });
330
331 // Print resource usage for each top module.
332 for (auto top : tops) {
333 if (failed(printAnalysisResult(resourceUsage, top, jsonOS ? nullptr : &os,
334 jsonOS.get())))
335 return signalPassFailure();
336 }
337
338 file->keep();
339 markAllAnalysesPreserved();
340}
assert(baseType &&"element must be base type")
static llvm::json::Object getModuleResourceUsageJSON(const ResourceUsageAnalysis::ResourceUsage &usage)
Convert ResourceUsage to JSON object.
static bool accumulateResourceCounts(Operation *op, llvm::StringMap< uint64_t > &counts)
Accumulate resource counts for an operation if it's a tracked resource type.
HW-specific instance graph with a virtual entry node linking to all publicly visible modules.
This graph tracks modules and where they are instantiated.
FailureOr< llvm::ArrayRef< InstanceGraphNode * > > getInferredTopLevelNodes()
Get the nodes corresponding to the inferred top-level modules of a circuit.
InstanceGraphNode * lookup(ModuleOpInterface op)
Look up an InstanceGraphNode for a module.
Analysis that computes resource usage for Synth dialect operations.
DenseMap< StringAttr, std::unique_ptr< ModuleResourceUsage > > designUsageCache
Cache of computed resource usage per module.
ModuleResourceUsage * getResourceUsage(igraph::ModuleOpInterface module)
Get resource usage for a module.
igraph::InstanceGraph * instanceGraph
Instance graph for module hierarchy traversal.
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition CalyxOps.cpp:56
void error(Twine message)
Definition LSPUtils.cpp:16
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
Definition synth.py:1
Resource usage for a single module, including local and total counts.