CIRCT 20.0.0git
Loading...
Searching...
No Matches
ElaborationPass.cpp
Go to the documentation of this file.
1//===- ElaborationPass.cpp - RTG ElaborationPass implementation -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass elaborates the random parts of the RTG dialect.
10// It performs randomization top-down, i.e., random constructs in a sequence
11// that is invoked multiple times can yield different randomization results
12// for each invokation.
13//
14//===----------------------------------------------------------------------===//
15
20#include "mlir/Dialect/Index/IR/IndexDialect.h"
21#include "mlir/Dialect/Index/IR/IndexOps.h"
22#include "mlir/Dialect/SCF/IR/SCF.h"
23#include "mlir/IR/IRMapping.h"
24#include "mlir/IR/PatternMatch.h"
25#include "llvm/ADT/DenseMapInfoVariant.h"
26#include "llvm/Support/Debug.h"
27#include <queue>
28#include <random>
29
30namespace circt {
31namespace rtg {
32#define GEN_PASS_DEF_ELABORATIONPASS
33#include "circt/Dialect/RTG/Transforms/RTGPasses.h.inc"
34} // namespace rtg
35} // namespace circt
36
37using namespace mlir;
38using namespace circt;
39using namespace circt::rtg;
40using llvm::MapVector;
41
42#define DEBUG_TYPE "rtg-elaboration"
43
44//===----------------------------------------------------------------------===//
45// Uniform Distribution Helper
46//
47// Simplified version of
48// https://github.com/llvm/llvm-project/blob/main/libcxx/include/__random/uniform_int_distribution.h
49// We use our custom version here to get the same results when compiled with
50// different compiler versions and standard libraries.
51//===----------------------------------------------------------------------===//
52
53static uint32_t computeMask(size_t w) {
54 size_t n = w / 32 + (w % 32 != 0);
55 size_t w0 = w / n;
56 return w0 > 0 ? uint32_t(~0) >> (32 - w0) : 0;
57}
58
59/// Get a number uniformly at random in the in specified range.
60static uint32_t getUniformlyInRange(std::mt19937 &rng, uint32_t a, uint32_t b) {
61 const uint32_t diff = b - a + 1;
62 if (diff == 1)
63 return a;
64
65 const uint32_t digits = std::numeric_limits<uint32_t>::digits;
66 if (diff == 0)
67 return rng();
68
69 uint32_t width = digits - llvm::countl_zero(diff) - 1;
70 if ((diff & (std::numeric_limits<uint32_t>::max() >> (digits - width))) != 0)
71 ++width;
72
73 uint32_t mask = computeMask(diff);
74 uint32_t u;
75 do {
76 u = rng() & mask;
77 } while (u >= diff);
78
79 return u + a;
80}
81
82//===----------------------------------------------------------------------===//
83// Elaborator Value
84//===----------------------------------------------------------------------===//
85
86namespace {
87struct BagStorage;
88struct SequenceStorage;
89struct SetStorage;
90
91/// Represents a unique virtual register.
92struct VirtualRegister {
93 VirtualRegister(uint64_t id, ArrayAttr allowedRegs)
94 : id(id), allowedRegs(allowedRegs) {}
95
96 bool operator==(const VirtualRegister &other) const {
97 assert(
98 id != other.id ||
99 allowedRegs == other.allowedRegs &&
100 "instances with the same ID must have the same allowed registers");
101 return id == other.id;
102 }
103
104 // The ID of this virtual register.
105 uint64_t id;
106
107 // The list of fixed registers allowed to be selected for this virtual
108 // register.
109 ArrayAttr allowedRegs;
110};
111
112struct LabelValue {
113 LabelValue(StringAttr name, uint64_t id = 0) : name(name), id(id) {}
114
115 bool operator==(const LabelValue &other) const {
116 return name == other.name && id == other.id;
117 }
118
119 /// The label name. For unique labels, this is just the prefix.
120 StringAttr name;
121
122 /// Standard label declarations always have id=0
123 uint64_t id;
124};
125
126/// The abstract base class for elaborated values.
127using ElaboratorValue =
128 std::variant<TypedAttr, BagStorage *, bool, size_t, SequenceStorage *,
129 SetStorage *, VirtualRegister, LabelValue>;
130
131// NOLINTNEXTLINE(readability-identifier-naming)
132llvm::hash_code hash_value(const VirtualRegister &val) {
133 return llvm::hash_value(val.id);
134}
135
136// NOLINTNEXTLINE(readability-identifier-naming)
137llvm::hash_code hash_value(const LabelValue &val) {
138 return llvm::hash_combine(val.id, val.name);
139}
140
141// NOLINTNEXTLINE(readability-identifier-naming)
142llvm::hash_code hash_value(const ElaboratorValue &val) {
143 return std::visit(
144 [&val](const auto &alternative) {
145 // Include index in hash to make sure same value as different
146 // alternatives don't collide.
147 return llvm::hash_combine(val.index(), alternative);
148 },
149 val);
150}
151
152} // namespace
153
154namespace llvm {
155
156template <>
157struct DenseMapInfo<bool> {
158 static inline unsigned getEmptyKey() { return false; }
159 static inline unsigned getTombstoneKey() { return true; }
160 static unsigned getHashValue(const bool &val) { return val * 37U; }
161
162 static bool isEqual(const bool &lhs, const bool &rhs) { return lhs == rhs; }
163};
164
165template <>
166struct DenseMapInfo<VirtualRegister> {
167 static inline VirtualRegister getEmptyKey() {
168 return VirtualRegister(0, ArrayAttr());
169 }
170 static inline VirtualRegister getTombstoneKey() {
171 return VirtualRegister(~0, ArrayAttr());
172 }
173 static unsigned getHashValue(const VirtualRegister &val) {
174 return llvm::hash_combine(val.id, val.allowedRegs);
175 }
176
177 static bool isEqual(const VirtualRegister &lhs, const VirtualRegister &rhs) {
178 return lhs == rhs;
179 }
180};
181
182template <>
183struct DenseMapInfo<LabelValue> {
184 static inline LabelValue getEmptyKey() { return LabelValue(StringAttr(), 0); }
185 static inline LabelValue getTombstoneKey() {
186 return LabelValue(StringAttr(), ~0);
187 }
188 static unsigned getHashValue(const LabelValue &val) {
189 return llvm::hash_combine(val.name, val.id);
190 }
191
192 static bool isEqual(const LabelValue &lhs, const LabelValue &rhs) {
193 return lhs == rhs;
194 }
195};
196
197} // namespace llvm
198
199//===----------------------------------------------------------------------===//
200// Elaborator Value Storages and Internalization
201//===----------------------------------------------------------------------===//
202
203namespace {
204
205/// Lightweight object to be used as the key for internalization sets. It caches
206/// the hashcode of the internalized object and a pointer to it. This allows a
207/// delayed allocation and construction of the actual object and thus only has
208/// to happen if the object is not already in the set.
209template <typename StorageTy>
210struct HashedStorage {
211 HashedStorage(unsigned hashcode = 0, StorageTy *storage = nullptr)
212 : hashcode(hashcode), storage(storage) {}
213
214 unsigned hashcode;
215 StorageTy *storage;
216};
217
218/// A DenseMapInfo implementation to support 'insert_as' for the internalization
219/// sets. When comparing two 'HashedStorage's we can just compare the already
220/// internalized storage pointers, otherwise we have to call the costly
221/// 'isEqual' method.
222template <typename StorageTy>
223struct StorageKeyInfo {
224 static inline HashedStorage<StorageTy> getEmptyKey() {
225 return HashedStorage<StorageTy>(0,
226 DenseMapInfo<StorageTy *>::getEmptyKey());
227 }
228 static inline HashedStorage<StorageTy> getTombstoneKey() {
229 return HashedStorage<StorageTy>(
230 0, DenseMapInfo<StorageTy *>::getTombstoneKey());
231 }
232
233 static inline unsigned getHashValue(const HashedStorage<StorageTy> &key) {
234 return key.hashcode;
235 }
236 static inline unsigned getHashValue(const StorageTy &key) {
237 return key.hashcode;
238 }
239
240 static inline bool isEqual(const HashedStorage<StorageTy> &lhs,
241 const HashedStorage<StorageTy> &rhs) {
242 return lhs.storage == rhs.storage;
243 }
244 static inline bool isEqual(const StorageTy &lhs,
245 const HashedStorage<StorageTy> &rhs) {
246 if (isEqual(rhs, getEmptyKey()) || isEqual(rhs, getTombstoneKey()))
247 return false;
248
249 return lhs.isEqual(rhs.storage);
250 }
251};
252
253/// Storage object for an '!rtg.set<T>'.
254struct SetStorage {
255 SetStorage(SetVector<ElaboratorValue> &&set, Type type)
256 : hashcode(llvm::hash_combine(
257 type, llvm::hash_combine_range(set.begin(), set.end()))),
258 set(std::move(set)), type(type) {}
259
260 bool isEqual(const SetStorage *other) const {
261 return hashcode == other->hashcode && set == other->set &&
262 type == other->type;
263 }
264
265 // The cached hashcode to avoid repeated computations.
266 const unsigned hashcode;
267
268 // Stores the elaborated values contained in the set.
269 const SetVector<ElaboratorValue> set;
270
271 // Store the set type such that we can materialize this evaluated value
272 // also in the case where the set is empty.
273 const Type type;
274};
275
276/// Storage object for an '!rtg.bag<T>'.
277struct BagStorage {
278 BagStorage(MapVector<ElaboratorValue, uint64_t> &&bag, Type type)
279 : hashcode(llvm::hash_combine(
280 type, llvm::hash_combine_range(bag.begin(), bag.end()))),
281 bag(std::move(bag)), type(type) {}
282
283 bool isEqual(const BagStorage *other) const {
284 return hashcode == other->hashcode && llvm::equal(bag, other->bag) &&
285 type == other->type;
286 }
287
288 // The cached hashcode to avoid repeated computations.
289 const unsigned hashcode;
290
291 // Stores the elaborated values contained in the bag with their number of
292 // occurences.
293 const MapVector<ElaboratorValue, uint64_t> bag;
294
295 // Store the bag type such that we can materialize this evaluated value
296 // also in the case where the bag is empty.
297 const Type type;
298};
299
300/// Storage object for an '!rtg.sequence'.
301struct SequenceStorage {
302 SequenceStorage(StringRef name, StringAttr familyName,
303 SmallVector<ElaboratorValue> &&args)
304 : hashcode(llvm::hash_combine(
305 name, familyName,
306 llvm::hash_combine_range(args.begin(), args.end()))),
307 name(name), familyName(familyName), args(std::move(args)) {}
308
309 bool isEqual(const SequenceStorage *other) const {
310 return hashcode == other->hashcode && name == other->name &&
311 familyName == other->familyName && args == other->args;
312 }
313
314 // The cached hashcode to avoid repeated computations.
315 const unsigned hashcode;
316
317 // The name of this fully substituted and elaborated sequence.
318 const StringRef name;
319
320 // The name of the sequence family this sequence is derived from.
321 const StringAttr familyName;
322
323 // The elaborator values used during substitution of the sequence family.
324 const SmallVector<ElaboratorValue> args;
325};
326
327/// An 'Internalizer' object internalizes storages and takes ownership of them.
328/// When the initializer object is destroyed, all owned storages are also
329/// deallocated and thus must not be accessed anymore.
330class Internalizer {
331public:
332 /// Internalize a storage of type `StorageTy` constructed with arguments
333 /// `args`. The pointers returned by this method can be used to compare
334 /// objects when, e.g., computing set differences, uniquing the elements in a
335 /// set, etc. Otherwise, we'd need to do a deep value comparison in those
336 /// situations.
337 template <typename StorageTy, typename... Args>
338 StorageTy *internalize(Args &&...args) {
339 StorageTy storage(std::forward<Args>(args)...);
340
341 auto existing = getInternSet<StorageTy>().insert_as(
342 HashedStorage<StorageTy>(storage.hashcode), storage);
343 StorageTy *&storagePtr = existing.first->storage;
344 if (existing.second)
345 storagePtr =
346 new (allocator.Allocate<StorageTy>()) StorageTy(std::move(storage));
347
348 return storagePtr;
349 }
350
351private:
352 template <typename StorageTy>
353 DenseSet<HashedStorage<StorageTy>, StorageKeyInfo<StorageTy>> &
354 getInternSet() {
355 if constexpr (std::is_same_v<StorageTy, SetStorage>)
356 return internedSets;
357 else if constexpr (std::is_same_v<StorageTy, BagStorage>)
358 return internedBags;
359 else if constexpr (std::is_same_v<StorageTy, SequenceStorage>)
360 return internedSequences;
361 else
362 static_assert(!sizeof(StorageTy),
363 "no intern set available for this storage type.");
364 }
365
366 // This allocator allocates on the heap. It automatically deallocates all
367 // objects it allocated once the allocator itself is destroyed.
368 llvm::BumpPtrAllocator allocator;
369
370 // The sets holding the internalized objects. We use one set per storage type
371 // such that we can have a simpler equality checking function (no need to
372 // compare some sort of TypeIDs).
373 DenseSet<HashedStorage<SetStorage>, StorageKeyInfo<SetStorage>> internedSets;
374 DenseSet<HashedStorage<BagStorage>, StorageKeyInfo<BagStorage>> internedBags;
375 DenseSet<HashedStorage<SequenceStorage>, StorageKeyInfo<SequenceStorage>>
376 internedSequences;
377};
378
379} // namespace
380
381#ifndef NDEBUG
382
383static llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
384 const ElaboratorValue &value);
385
386static void print(TypedAttr val, llvm::raw_ostream &os) {
387 os << "<attr " << val << ">";
388}
389
390static void print(BagStorage *val, llvm::raw_ostream &os) {
391 os << "<bag {";
392 llvm::interleaveComma(val->bag, os,
393 [&](const std::pair<ElaboratorValue, uint64_t> &el) {
394 os << el.first << " -> " << el.second;
395 });
396 os << "} at " << val << ">";
397}
398
399static void print(bool val, llvm::raw_ostream &os) {
400 os << "<bool " << (val ? "true" : "false") << ">";
401}
402
403static void print(size_t val, llvm::raw_ostream &os) {
404 os << "<index " << val << ">";
405}
406
407static void print(SequenceStorage *val, llvm::raw_ostream &os) {
408 os << "<sequence @" << val->name << " derived from @"
409 << val->familyName.getValue() << "(";
410 llvm::interleaveComma(val->args, os,
411 [&](const ElaboratorValue &val) { os << val; });
412 os << ") at " << val << ">";
413}
414
415static void print(SetStorage *val, llvm::raw_ostream &os) {
416 os << "<set {";
417 llvm::interleaveComma(val->set, os,
418 [&](const ElaboratorValue &val) { os << val; });
419 os << "} at " << val << ">";
420}
421
422static void print(const VirtualRegister &val, llvm::raw_ostream &os) {
423 os << "<virtual-register " << val.id << " " << val.allowedRegs << ">";
424}
425
426static void print(const LabelValue &val, llvm::raw_ostream &os) {
427 os << "<label " << val.id << " " << val.name << ">";
428}
429
430static llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
431 const ElaboratorValue &value) {
432 std::visit([&](auto val) { print(val, os); }, value);
433
434 return os;
435}
436
437#endif
438
439//===----------------------------------------------------------------------===//
440// Elaborator Value Materialization
441//===----------------------------------------------------------------------===//
442
443namespace {
444
445/// Construct an SSA value from a given elaborated value.
446class Materializer {
447public:
448 Materializer(OpBuilder builder) : builder(builder) {}
449
450 /// Materialize IR representing the provided `ElaboratorValue` and return the
451 /// `Value` or a null value on failure.
452 Value materialize(ElaboratorValue val, Location loc,
453 std::queue<SequenceStorage *> &elabRequests,
454 function_ref<InFlightDiagnostic()> emitError) {
455 auto iter = materializedValues.find(val);
456 if (iter != materializedValues.end())
457 return iter->second;
458
459 LLVM_DEBUG(llvm::dbgs() << "Materializing " << val << "\n\n");
460
461 return std::visit(
462 [&](auto val) { return visit(val, loc, elabRequests, emitError); },
463 val);
464 }
465
466 /// If `op` is not in the same region as the materializer insertion point, a
467 /// clone is created at the materializer's insertion point by also
468 /// materializing the `ElaboratorValue`s for each operand just before it.
469 /// Otherwise, all operations after the materializer's insertion point are
470 /// deleted until `op` is reached. An error is returned if the operation is
471 /// before the insertion point.
472 LogicalResult materialize(Operation *op,
473 DenseMap<Value, ElaboratorValue> &state,
474 std::queue<SequenceStorage *> &elabRequests) {
475 if (op->getNumRegions() > 0)
476 return op->emitOpError("ops with nested regions must be elaborated away");
477
478 // We don't support opaque values. If there is an SSA value that has a
479 // use-site it needs an equivalent ElaborationValue representation.
480 // NOTE: We could support cases where there is initially a use-site but that
481 // op is guaranteed to be deleted during elaboration. Or the use-sites are
482 // replaced with freshly materialized values from the ElaborationValue. But
483 // then, why can't we delete the value defining op?
484 for (auto res : op->getResults())
485 if (!res.use_empty())
486 return op->emitOpError(
487 "ops with results that have uses are not supported");
488
489 if (op->getParentRegion() == builder.getBlock()->getParent()) {
490 // We are doing in-place materialization, so mark all ops deleted until we
491 // reach the one to be materialized and modify it in-place.
492 deleteOpsUntil([&](auto iter) { return &*iter == op; });
493
494 if (builder.getInsertionPoint() == builder.getBlock()->end())
495 return op->emitError("operation did not occur after the current "
496 "materializer insertion point");
497
498 LLVM_DEBUG(llvm::dbgs() << "Modifying in-place: " << *op << "\n\n");
499 } else {
500 LLVM_DEBUG(llvm::dbgs() << "Materializing a clone of " << *op << "\n\n");
501 op = builder.clone(*op);
502 builder.setInsertionPoint(op);
503 }
504
505 for (auto &operand : op->getOpOperands()) {
506 auto emitError = [&]() {
507 auto diag = op->emitError();
508 diag.attachNote(op->getLoc())
509 << "while materializing value for operand#"
510 << operand.getOperandNumber();
511 return diag;
512 };
513
514 Value val = materialize(state.at(operand.get()), op->getLoc(),
515 elabRequests, emitError);
516 if (!val)
517 return failure();
518
519 operand.set(val);
520 }
521
522 builder.setInsertionPointAfter(op);
523 return success();
524 }
525
526 /// Should be called once the `Region` is successfully materialized. No calls
527 /// to `materialize` should happen after this anymore.
528 void finalize() {
529 deleteOpsUntil([](auto iter) { return false; });
530
531 for (auto *op : llvm::reverse(toDelete))
532 op->erase();
533 }
534
535private:
536 void deleteOpsUntil(function_ref<bool(Block::iterator)> stop) {
537 auto ip = builder.getInsertionPoint();
538 while (ip != builder.getBlock()->end() && !stop(ip)) {
539 LLVM_DEBUG(llvm::dbgs() << "Marking to be deleted: " << *ip << "\n\n");
540 toDelete.push_back(&*ip);
541
542 builder.setInsertionPointAfter(&*ip);
543 ip = builder.getInsertionPoint();
544 }
545 }
546
547 Value visit(TypedAttr val, Location loc,
548 std::queue<SequenceStorage *> &elabRequests,
549 function_ref<InFlightDiagnostic()> emitError) {
550 // For index attributes (and arithmetic operations on them) we use the
551 // index dialect.
552 if (auto intAttr = dyn_cast<IntegerAttr>(val);
553 intAttr && isa<IndexType>(val.getType())) {
554 Value res = builder.create<index::ConstantOp>(loc, intAttr);
555 materializedValues[val] = res;
556 return res;
557 }
558
559 // For any other attribute, we just call the materializer of the dialect
560 // defining that attribute.
561 auto *op =
562 val.getDialect().materializeConstant(builder, val, val.getType(), loc);
563 if (!op) {
564 emitError() << "materializer of dialect '"
565 << val.getDialect().getNamespace()
566 << "' unable to materialize value for attribute '" << val
567 << "'";
568 return Value();
569 }
570
571 Value res = op->getResult(0);
572 materializedValues[val] = res;
573 return res;
574 }
575
576 Value visit(size_t val, Location loc,
577 std::queue<SequenceStorage *> &elabRequests,
578 function_ref<InFlightDiagnostic()> emitError) {
579 Value res = builder.create<index::ConstantOp>(loc, val);
580 materializedValues[val] = res;
581 return res;
582 }
583
584 Value visit(bool val, Location loc,
585 std::queue<SequenceStorage *> &elabRequests,
586 function_ref<InFlightDiagnostic()> emitError) {
587 Value res = builder.create<index::BoolConstantOp>(loc, val);
588 materializedValues[val] = res;
589 return res;
590 }
591
592 Value visit(SetStorage *val, Location loc,
593 std::queue<SequenceStorage *> &elabRequests,
594 function_ref<InFlightDiagnostic()> emitError) {
595 SmallVector<Value> elements;
596 elements.reserve(val->set.size());
597 for (auto el : val->set) {
598 auto materialized = materialize(el, loc, elabRequests, emitError);
599 if (!materialized)
600 return Value();
601
602 elements.push_back(materialized);
603 }
604
605 auto res = builder.create<SetCreateOp>(loc, val->type, elements);
606 materializedValues[val] = res;
607 return res;
608 }
609
610 Value visit(BagStorage *val, Location loc,
611 std::queue<SequenceStorage *> &elabRequests,
612 function_ref<InFlightDiagnostic()> emitError) {
613 SmallVector<Value> values, weights;
614 values.reserve(val->bag.size());
615 weights.reserve(val->bag.size());
616 for (auto [val, weight] : val->bag) {
617 auto materializedVal = materialize(val, loc, elabRequests, emitError);
618 auto materializedWeight =
619 materialize(weight, loc, elabRequests, emitError);
620 if (!materializedVal || !materializedWeight)
621 return Value();
622
623 values.push_back(materializedVal);
624 weights.push_back(materializedWeight);
625 }
626
627 auto res = builder.create<BagCreateOp>(loc, val->type, values, weights);
628 materializedValues[val] = res;
629 return res;
630 }
631
632 Value visit(SequenceStorage *val, Location loc,
633 std::queue<SequenceStorage *> &elabRequests,
634 function_ref<InFlightDiagnostic()> emitError) {
635 elabRequests.push(val);
636 return builder.create<SequenceClosureOp>(loc, val->name, ValueRange());
637 }
638
639 Value visit(const VirtualRegister &val, Location loc,
640 std::queue<SequenceStorage *> &elabRequests,
641 function_ref<InFlightDiagnostic()> emitError) {
642 auto res = builder.create<VirtualRegisterOp>(loc, val.allowedRegs);
643 materializedValues[val] = res;
644 return res;
645 }
646
647 Value visit(const LabelValue &val, Location loc,
648 std::queue<SequenceStorage *> &elabRequests,
649 function_ref<InFlightDiagnostic()> emitError) {
650 if (val.id == 0) {
651 auto res = builder.create<LabelDeclOp>(loc, val.name, ValueRange());
652 materializedValues[val] = res;
653 return res;
654 }
655
656 auto res = builder.create<LabelUniqueDeclOp>(loc, val.name, ValueRange());
657 materializedValues[val] = res;
658 return res;
659 }
660
661private:
662 /// Cache values we have already materialized to reuse them later. We start
663 /// with an insertion point at the start of the block and cache the (updated)
664 /// insertion point such that future materializations can also reuse previous
665 /// materializations without running into dominance issues (or requiring
666 /// additional checks to avoid them).
667 DenseMap<ElaboratorValue, Value> materializedValues;
668
669 /// Cache the builder to continue insertions at their current insertion point
670 /// for the reason stated above.
671 OpBuilder builder;
672
673 SmallVector<Operation *> toDelete;
674};
675
676//===----------------------------------------------------------------------===//
677// Elaboration Visitor
678//===----------------------------------------------------------------------===//
679
680/// Used to signal to the elaboration driver whether the operation should be
681/// removed.
682enum class DeletionKind { Keep, Delete };
683
684/// Elaborator state that should be shared by all elaborator instances.
685struct ElaboratorSharedState {
686 ElaboratorSharedState(SymbolTable &table, unsigned seed)
687 : table(table), rng(seed) {}
688
689 SymbolTable &table;
690 std::mt19937 rng;
691 Namespace names;
692 Namespace labelNames;
693 Internalizer internalizer;
694
695 /// The worklist used to keep track of the test and sequence operations to
696 /// make sure they are processed top-down (BFS traversal).
697 std::queue<SequenceStorage *> worklist;
698
699 uint64_t virtualRegisterID = 0;
700 uint64_t uniqueLabelID = 1;
701};
702
703/// Interprets the IR to perform and lower the represented randomizations.
704class Elaborator : public RTGOpVisitor<Elaborator, FailureOr<DeletionKind>> {
705public:
707 using RTGBase::visitOp;
708
709 Elaborator(ElaboratorSharedState &sharedState, Materializer &materializer)
710 : sharedState(sharedState), materializer(materializer) {}
711
712 template <typename ValueTy>
713 inline ValueTy get(Value val) const {
714 return std::get<ValueTy>(state.at(val));
715 }
716
717 FailureOr<DeletionKind> visitConstantLike(Operation *op) {
718 assert(op->hasTrait<OpTrait::ConstantLike>() &&
719 "op is expected to be constant-like");
720
721 SmallVector<OpFoldResult, 1> result;
722 auto foldResult = op->fold(result);
723 (void)foldResult; // Make sure there is a user when assertions are off.
724 assert(succeeded(foldResult) &&
725 "constant folder of a constant-like must always succeed");
726 auto attr = dyn_cast<TypedAttr>(result[0].dyn_cast<Attribute>());
727 if (!attr)
728 return op->emitError(
729 "only typed attributes supported for constant-like operations");
730
731 auto intAttr = dyn_cast<IntegerAttr>(attr);
732 if (intAttr && isa<IndexType>(attr.getType()))
733 state[op->getResult(0)] = size_t(intAttr.getInt());
734 else if (intAttr && intAttr.getType().isSignlessInteger(1))
735 state[op->getResult(0)] = bool(intAttr.getInt());
736 else
737 state[op->getResult(0)] = attr;
738
739 return DeletionKind::Delete;
740 }
741
742 /// Print a nice error message for operations we don't support yet.
743 FailureOr<DeletionKind> visitUnhandledOp(Operation *op) {
744 return op->emitOpError("elaboration not supported");
745 }
746
747 FailureOr<DeletionKind> visitExternalOp(Operation *op) {
748 if (op->hasTrait<OpTrait::ConstantLike>())
749 return visitConstantLike(op);
750
751 // TODO: we only have this to be able to write tests for this pass without
752 // having to add support for more operations for now, so it should be
753 // removed once it is not necessary anymore for writing tests
754 if (op->use_empty())
755 return DeletionKind::Keep;
756
757 return visitUnhandledOp(op);
758 }
759
760 FailureOr<DeletionKind> visitOp(SequenceClosureOp op) {
761 SmallVector<ElaboratorValue> args;
762 for (auto arg : op.getArgs())
763 args.push_back(state.at(arg));
764
765 auto familyName = op.getSequenceAttr();
766 auto name = sharedState.names.newName(familyName.getValue());
767 state[op.getResult()] =
768 sharedState.internalizer.internalize<SequenceStorage>(name, familyName,
769 std::move(args));
770 return DeletionKind::Delete;
771 }
772
773 FailureOr<DeletionKind> visitOp(InvokeSequenceOp op) {
774 return DeletionKind::Keep;
775 }
776
777 FailureOr<DeletionKind> visitOp(SetCreateOp op) {
778 SetVector<ElaboratorValue> set;
779 for (auto val : op.getElements())
780 set.insert(state.at(val));
781
782 state[op.getSet()] = sharedState.internalizer.internalize<SetStorage>(
783 std::move(set), op.getSet().getType());
784 return DeletionKind::Delete;
785 }
786
787 FailureOr<DeletionKind> visitOp(SetSelectRandomOp op) {
788 auto set = get<SetStorage *>(op.getSet())->set;
789
790 size_t selected;
791 if (auto intAttr =
792 op->getAttrOfType<IntegerAttr>("rtg.elaboration_custom_seed")) {
793 std::mt19937 customRng(intAttr.getInt());
794 selected = getUniformlyInRange(customRng, 0, set.size() - 1);
795 } else {
796 selected = getUniformlyInRange(sharedState.rng, 0, set.size() - 1);
797 }
798
799 state[op.getResult()] = set[selected];
800 return DeletionKind::Delete;
801 }
802
803 FailureOr<DeletionKind> visitOp(SetDifferenceOp op) {
804 auto original = get<SetStorage *>(op.getOriginal())->set;
805 auto diff = get<SetStorage *>(op.getDiff())->set;
806
807 SetVector<ElaboratorValue> result(original);
808 result.set_subtract(diff);
809
810 state[op.getResult()] = sharedState.internalizer.internalize<SetStorage>(
811 std::move(result), op.getResult().getType());
812 return DeletionKind::Delete;
813 }
814
815 FailureOr<DeletionKind> visitOp(SetUnionOp op) {
816 SetVector<ElaboratorValue> result;
817 for (auto set : op.getSets())
818 result.set_union(get<SetStorage *>(set)->set);
819
820 state[op.getResult()] = sharedState.internalizer.internalize<SetStorage>(
821 std::move(result), op.getType());
822 return DeletionKind::Delete;
823 }
824
825 FailureOr<DeletionKind> visitOp(SetSizeOp op) {
826 auto size = get<SetStorage *>(op.getSet())->set.size();
827 state[op.getResult()] = size;
828 return DeletionKind::Delete;
829 }
830
831 FailureOr<DeletionKind> visitOp(BagCreateOp op) {
832 MapVector<ElaboratorValue, uint64_t> bag;
833 for (auto [val, multiple] :
834 llvm::zip(op.getElements(), op.getMultiples())) {
835 // If the multiple is not stored as an AttributeValue, the elaboration
836 // must have already failed earlier (since we don't have
837 // unevaluated/opaque values).
838 bag[state.at(val)] += get<size_t>(multiple);
839 }
840
841 state[op.getBag()] = sharedState.internalizer.internalize<BagStorage>(
842 std::move(bag), op.getType());
843 return DeletionKind::Delete;
844 }
845
846 FailureOr<DeletionKind> visitOp(BagSelectRandomOp op) {
847 auto bag = get<BagStorage *>(op.getBag())->bag;
848
849 SmallVector<std::pair<ElaboratorValue, uint32_t>> prefixSum;
850 prefixSum.reserve(bag.size());
851 uint32_t accumulator = 0;
852 for (auto [val, weight] : bag) {
853 accumulator += weight;
854 prefixSum.push_back({val, accumulator});
855 }
856
857 auto customRng = sharedState.rng;
858 if (auto intAttr =
859 op->getAttrOfType<IntegerAttr>("rtg.elaboration_custom_seed")) {
860 customRng = std::mt19937(intAttr.getInt());
861 }
862
863 auto idx = getUniformlyInRange(customRng, 0, accumulator - 1);
864 auto *iter = llvm::upper_bound(
865 prefixSum, idx,
866 [](uint32_t a, const std::pair<ElaboratorValue, uint32_t> &b) {
867 return a < b.second;
868 });
869
870 state[op.getResult()] = iter->first;
871 return DeletionKind::Delete;
872 }
873
874 FailureOr<DeletionKind> visitOp(BagDifferenceOp op) {
875 auto original = get<BagStorage *>(op.getOriginal())->bag;
876 auto diff = get<BagStorage *>(op.getDiff())->bag;
877
878 MapVector<ElaboratorValue, uint64_t> result;
879 for (const auto &el : original) {
880 if (!diff.contains(el.first)) {
881 result.insert(el);
882 continue;
883 }
884
885 if (op.getInf())
886 continue;
887
888 auto toDiff = diff.lookup(el.first);
889 if (el.second <= toDiff)
890 continue;
891
892 result.insert({el.first, el.second - toDiff});
893 }
894
895 state[op.getResult()] = sharedState.internalizer.internalize<BagStorage>(
896 std::move(result), op.getType());
897 return DeletionKind::Delete;
898 }
899
900 FailureOr<DeletionKind> visitOp(BagUnionOp op) {
901 MapVector<ElaboratorValue, uint64_t> result;
902 for (auto bag : op.getBags()) {
903 auto val = get<BagStorage *>(bag)->bag;
904 for (auto [el, multiple] : val)
905 result[el] += multiple;
906 }
907
908 state[op.getResult()] = sharedState.internalizer.internalize<BagStorage>(
909 std::move(result), op.getType());
910 return DeletionKind::Delete;
911 }
912
913 FailureOr<DeletionKind> visitOp(BagUniqueSizeOp op) {
914 auto size = get<BagStorage *>(op.getBag())->bag.size();
915 state[op.getResult()] = size;
916 return DeletionKind::Delete;
917 }
918
919 FailureOr<DeletionKind> visitOp(FixedRegisterOp op) {
920 return visitConstantLike(op);
921 }
922
923 FailureOr<DeletionKind> visitOp(VirtualRegisterOp op) {
924 state[op.getResult()] = VirtualRegister(sharedState.virtualRegisterID++,
925 op.getAllowedRegsAttr());
926 return DeletionKind::Delete;
927 }
928
929 StringAttr substituteFormatString(StringAttr formatString,
930 ValueRange substitutes) const {
931 if (substitutes.empty() || formatString.empty())
932 return formatString;
933
934 auto original = formatString.getValue().str();
935 for (auto [i, subst] : llvm::enumerate(substitutes)) {
936 size_t startPos = 0;
937 std::string from = "{{" + std::to_string(i) + "}}";
938 while ((startPos = original.find(from, startPos)) != std::string::npos) {
939 auto substString = std::to_string(get<size_t>(subst));
940 original.replace(startPos, from.length(), substString);
941 }
942 }
943
944 return StringAttr::get(formatString.getContext(), original);
945 }
946
947 FailureOr<DeletionKind> visitOp(LabelDeclOp op) {
948 auto substituted =
949 substituteFormatString(op.getFormatStringAttr(), op.getArgs());
950 sharedState.labelNames.add(substituted.getValue());
951 state[op.getLabel()] = LabelValue(substituted);
952 return DeletionKind::Delete;
953 }
954
955 FailureOr<DeletionKind> visitOp(LabelUniqueDeclOp op) {
956 state[op.getLabel()] = LabelValue(
957 substituteFormatString(op.getFormatStringAttr(), op.getArgs()),
958 sharedState.uniqueLabelID++);
959 return DeletionKind::Delete;
960 }
961
962 FailureOr<DeletionKind> visitOp(LabelOp op) { return DeletionKind::Keep; }
963
964 FailureOr<DeletionKind> visitOp(scf::IfOp op) {
965 bool cond = get<bool>(op.getCondition());
966 auto &toElaborate = cond ? op.getThenRegion() : op.getElseRegion();
967 if (toElaborate.empty())
968 return DeletionKind::Delete;
969
970 // Just reuse this elaborator for the nested region because we need access
971 // to the elaborated values outside the nested region (since it is not
972 // isolated from above) and we want to materialize the region inline, thus
973 // don't need a new materializer instance.
974 if (failed(elaborate(toElaborate)))
975 return failure();
976
977 // Map the results of the 'scf.if' to the yielded values.
978 for (auto [res, out] :
979 llvm::zip(op.getResults(),
980 toElaborate.front().getTerminator()->getOperands()))
981 state[res] = state.at(out);
982
983 return DeletionKind::Delete;
984 }
985
986 FailureOr<DeletionKind> visitOp(scf::ForOp op) {
987 if (!(std::holds_alternative<size_t>(state.at(op.getLowerBound())) &&
988 std::holds_alternative<size_t>(state.at(op.getStep())) &&
989 std::holds_alternative<size_t>(state.at(op.getUpperBound()))))
990 return op->emitOpError("can only elaborate index type iterator");
991
992 auto lowerBound = get<size_t>(op.getLowerBound());
993 auto step = get<size_t>(op.getStep());
994 auto upperBound = get<size_t>(op.getUpperBound());
995
996 // Prepare for first iteration by assigning the nested regions block
997 // arguments. We can just reuse this elaborator because we need access to
998 // values elaborated in the parent region anyway and materialize everything
999 // inline (i.e., don't need a new materializer).
1000 state[op.getInductionVar()] = lowerBound;
1001 for (auto [iterArg, initArg] :
1002 llvm::zip(op.getRegionIterArgs(), op.getInitArgs()))
1003 state[iterArg] = state.at(initArg);
1004
1005 // This loop performs the actual 'scf.for' loop iterations.
1006 for (size_t i = lowerBound; i < upperBound; i += step) {
1007 if (failed(elaborate(op.getBodyRegion())))
1008 return failure();
1009
1010 // Prepare for the next iteration by updating the mapping of the nested
1011 // regions block arguments
1012 state[op.getInductionVar()] = i + step;
1013 for (auto [iterArg, prevIterArg] :
1014 llvm::zip(op.getRegionIterArgs(),
1015 op.getBody()->getTerminator()->getOperands()))
1016 state[iterArg] = state.at(prevIterArg);
1017 }
1018
1019 // Transfer the previously yielded values to the for loop result values.
1020 for (auto [res, iterArg] :
1021 llvm::zip(op->getResults(), op.getRegionIterArgs()))
1022 state[res] = state.at(iterArg);
1023
1024 return DeletionKind::Delete;
1025 }
1026
1027 FailureOr<DeletionKind> visitOp(scf::YieldOp op) {
1028 return DeletionKind::Delete;
1029 }
1030
1031 FailureOr<DeletionKind> visitOp(index::AddOp op) {
1032 size_t lhs = get<size_t>(op.getLhs());
1033 size_t rhs = get<size_t>(op.getRhs());
1034 state[op.getResult()] = lhs + rhs;
1035 return DeletionKind::Delete;
1036 }
1037
1038 FailureOr<DeletionKind> visitOp(index::CmpOp op) {
1039 size_t lhs = get<size_t>(op.getLhs());
1040 size_t rhs = get<size_t>(op.getRhs());
1041 bool result;
1042 switch (op.getPred()) {
1043 case index::IndexCmpPredicate::EQ:
1044 result = lhs == rhs;
1045 break;
1046 case index::IndexCmpPredicate::NE:
1047 result = lhs != rhs;
1048 break;
1049 case index::IndexCmpPredicate::ULT:
1050 result = lhs < rhs;
1051 break;
1052 case index::IndexCmpPredicate::ULE:
1053 result = lhs <= rhs;
1054 break;
1055 case index::IndexCmpPredicate::UGT:
1056 result = lhs > rhs;
1057 break;
1058 case index::IndexCmpPredicate::UGE:
1059 result = lhs >= rhs;
1060 break;
1061 default:
1062 return op->emitOpError("elaboration not supported");
1063 }
1064 state[op.getResult()] = result;
1065 return DeletionKind::Delete;
1066 }
1067
1068 FailureOr<DeletionKind> dispatchOpVisitor(Operation *op) {
1069 return TypeSwitch<Operation *, FailureOr<DeletionKind>>(op)
1070 .Case<
1071 // Index ops
1072 index::AddOp, index::CmpOp,
1073 // SCF ops
1074 scf::IfOp, scf::ForOp, scf::YieldOp>(
1075 [&](auto op) { return visitOp(op); })
1076 .Default([&](Operation *op) { return RTGBase::dispatchOpVisitor(op); });
1077 }
1078
1079 // NOLINTNEXTLINE(misc-no-recursion)
1080 LogicalResult elaborate(Region &region,
1081 ArrayRef<ElaboratorValue> regionArguments = {}) {
1082 if (region.getBlocks().size() > 1)
1083 return region.getParentOp()->emitOpError(
1084 "regions with more than one block are not supported");
1085
1086 for (auto [arg, elabArg] :
1087 llvm::zip(region.getArguments(), regionArguments))
1088 state[arg] = elabArg;
1089
1090 Block *block = &region.front();
1091 for (auto &op : *block) {
1092 auto result = dispatchOpVisitor(&op);
1093 if (failed(result))
1094 return failure();
1095
1096 if (*result == DeletionKind::Keep)
1097 if (failed(materializer.materialize(&op, state, sharedState.worklist)))
1098 return failure();
1099
1100 LLVM_DEBUG({
1101 llvm::dbgs() << "Elaborated " << op << " to\n[";
1102
1103 llvm::interleaveComma(op.getResults(), llvm::dbgs(), [&](auto res) {
1104 if (state.contains(res))
1105 llvm::dbgs() << state.at(res);
1106 else
1107 llvm::dbgs() << "unknown";
1108 });
1109
1110 llvm::dbgs() << "]\n\n";
1111 });
1112 }
1113
1114 return success();
1115 }
1116
1117private:
1118 // State to be shared between all elaborator instances.
1119 ElaboratorSharedState &sharedState;
1120
1121 // Allows us to materialize ElaboratorValues to the IR operations necessary to
1122 // obtain an SSA value representing that elaborated value.
1123 Materializer &materializer;
1124
1125 // A map from SSA values to a pointer of an interned elaborator value.
1126 DenseMap<Value, ElaboratorValue> state;
1127};
1128} // namespace
1129
1130//===----------------------------------------------------------------------===//
1131// Elaborator Pass
1132//===----------------------------------------------------------------------===//
1133
1134namespace {
1135struct ElaborationPass
1136 : public rtg::impl::ElaborationPassBase<ElaborationPass> {
1137 using Base::Base;
1138
1139 void runOnOperation() override;
1140 void cloneTargetsIntoTests(SymbolTable &table);
1141 LogicalResult elaborateModule(ModuleOp moduleOp, SymbolTable &table);
1142 LogicalResult inlineSequences(TestOp testOp, SymbolTable &table);
1143};
1144} // namespace
1145
1146void ElaborationPass::runOnOperation() {
1147 auto moduleOp = getOperation();
1148 SymbolTable table(moduleOp);
1149
1150 cloneTargetsIntoTests(table);
1151
1152 if (failed(elaborateModule(moduleOp, table)))
1153 return signalPassFailure();
1154}
1155
1156void ElaborationPass::cloneTargetsIntoTests(SymbolTable &table) {
1157 auto moduleOp = getOperation();
1158 for (auto target : llvm::make_early_inc_range(moduleOp.getOps<TargetOp>())) {
1159 for (auto test : moduleOp.getOps<TestOp>()) {
1160 // If the test requires nothing from a target, we can always run it.
1161 if (test.getTarget().getEntries().empty())
1162 continue;
1163
1164 // If the target requirements do not match, skip this test
1165 // TODO: allow target refinements, just not coarsening
1166 if (target.getTarget() != test.getTarget())
1167 continue;
1168
1169 IRRewriter rewriter(test);
1170 // Create a new test for the matched target
1171 auto newTest = cast<TestOp>(test->clone());
1172 newTest.setSymName(test.getSymName().str() + "_" +
1173 target.getSymName().str());
1174 table.insert(newTest, rewriter.getInsertionPoint());
1175
1176 // Copy the target body into the newly created test
1177 IRMapping mapping;
1178 rewriter.setInsertionPointToStart(newTest.getBody());
1179 for (auto &op : target.getBody()->without_terminator())
1180 rewriter.clone(op, mapping);
1181
1182 for (auto [returnVal, result] :
1183 llvm::zip(target.getBody()->getTerminator()->getOperands(),
1184 newTest.getBody()->getArguments()))
1185 result.replaceAllUsesWith(mapping.lookup(returnVal));
1186
1187 newTest.getBody()->eraseArguments(0,
1188 newTest.getBody()->getNumArguments());
1189 newTest.setTarget(DictType::get(&getContext(), {}));
1190 }
1191
1192 target->erase();
1193 }
1194
1195 // Erase all remaining non-matched tests.
1196 for (auto test : llvm::make_early_inc_range(moduleOp.getOps<TestOp>()))
1197 if (!test.getTarget().getEntries().empty())
1198 test->erase();
1199}
1200
1201LogicalResult ElaborationPass::elaborateModule(ModuleOp moduleOp,
1202 SymbolTable &table) {
1203 ElaboratorSharedState state(table, seed);
1204
1205 // Update the name cache
1206 state.names.add(moduleOp);
1207
1208 // Initialize the worklist with the test ops since they cannot be placed by
1209 // other ops.
1210 for (auto testOp : moduleOp.getOps<TestOp>()) {
1211 LLVM_DEBUG(llvm::dbgs()
1212 << "\n=== Elaborating test @" << testOp.getSymName() << "\n\n");
1213 Materializer materializer(OpBuilder::atBlockBegin(testOp.getBody()));
1214 Elaborator elaborator(state, materializer);
1215 if (failed(elaborator.elaborate(testOp.getBodyRegion())))
1216 return failure();
1217
1218 materializer.finalize();
1219 }
1220
1221 // Do top-down BFS traversal such that elaborating a sequence further down
1222 // does not fix the outcome for multiple placements.
1223 while (!state.worklist.empty()) {
1224 auto *curr = state.worklist.front();
1225 state.worklist.pop();
1226
1227 if (table.lookup<SequenceOp>(curr->name))
1228 continue;
1229
1230 auto familyOp = table.lookup<SequenceOp>(curr->familyName);
1231 // TODO: don't clone if this is the only remaining reference to this
1232 // sequence
1233 OpBuilder builder(familyOp);
1234 auto seqOp = builder.cloneWithoutRegions(familyOp);
1235 seqOp.getBodyRegion().emplaceBlock();
1236 seqOp.setSymName(curr->name);
1237 table.insert(seqOp);
1238 assert(seqOp.getSymName() == curr->name && "should not have been renamed");
1239
1240 LLVM_DEBUG(llvm::dbgs()
1241 << "\n=== Elaborating sequence family @" << familyOp.getSymName()
1242 << " into @" << seqOp.getSymName() << "\n\n");
1243
1244 Materializer materializer(OpBuilder::atBlockBegin(seqOp.getBody()));
1245 Elaborator elaborator(state, materializer);
1246 if (failed(elaborator.elaborate(familyOp.getBodyRegion(), curr->args)))
1247 return failure();
1248
1249 materializer.finalize();
1250 }
1251
1252 for (auto testOp : moduleOp.getOps<TestOp>()) {
1253 // Inline all sequences and remove the operations that place the sequences.
1254 if (failed(inlineSequences(testOp, table)))
1255 return failure();
1256
1257 // Convert 'rtg.label_unique_decl' to 'rtg.label_decl' by choosing a unique
1258 // name based on the set of names we collected during elaboration.
1259 for (auto labelOp :
1260 llvm::make_early_inc_range(testOp.getOps<LabelUniqueDeclOp>())) {
1261 IRRewriter rewriter(labelOp);
1262 auto newName = state.labelNames.newName(labelOp.getFormatString());
1263 rewriter.replaceOpWithNewOp<LabelDeclOp>(labelOp, newName, ValueRange());
1264 }
1265 }
1266
1267 // Remove all sequences since they are not accessible from the outside and
1268 // are not needed anymore since we fully inlined them.
1269 for (auto seqOp : llvm::make_early_inc_range(moduleOp.getOps<SequenceOp>()))
1270 seqOp->erase();
1271
1272 return success();
1273}
1274
1275LogicalResult ElaborationPass::inlineSequences(TestOp testOp,
1276 SymbolTable &table) {
1277 OpBuilder builder(testOp);
1278 for (auto iter = testOp.getBody()->begin();
1279 iter != testOp.getBody()->end();) {
1280 auto invokeOp = dyn_cast<InvokeSequenceOp>(&*iter);
1281 if (!invokeOp) {
1282 ++iter;
1283 continue;
1284 }
1285
1286 auto seqClosureOp =
1287 invokeOp.getSequence().getDefiningOp<SequenceClosureOp>();
1288 if (!seqClosureOp)
1289 return invokeOp->emitError(
1290 "sequence operand not directly defined by sequence_closure op");
1291
1292 auto seqOp = table.lookup<SequenceOp>(seqClosureOp.getSequenceAttr());
1293
1294 builder.setInsertionPointAfter(invokeOp);
1295 IRMapping mapping;
1296 for (auto &op : *seqOp.getBody())
1297 builder.clone(op, mapping);
1298
1299 (iter++)->erase();
1300
1301 if (seqClosureOp->use_empty())
1302 seqClosureOp->erase();
1303 }
1304
1305 return success();
1306}
assert(baseType &&"element must be base type")
static uint32_t computeMask(size_t w)
static uint32_t getUniformlyInRange(std::mt19937 &rng, uint32_t a, uint32_t b)
Get a number uniformly at random in the in specified range.
static void print(TypedAttr val, llvm::raw_ostream &os)
static InstancePath empty
A namespace that is used to store existing names and generate new names in some scope within the IR.
Definition Namespace.h:30
This helps visit TypeOp nodes.
Definition RTGVisitors.h:29
ResultType visitExternalOp(Operation *op, ExtraArgs... args)
Definition RTGVisitors.h:80
ResultType dispatchOpVisitor(Operation *op, ExtraArgs... args)
Definition RTGVisitors.h:31
ResultType visitUnhandledOp(Operation *op, ExtraArgs... args)
This callback is invoked on any operations that are not handled by the concrete visitor.
Direction get(bool isOutput)
Returns an output direction if isOutput is true, otherwise returns an input direction.
Definition CalyxOps.cpp:55
OS & operator<<(OS &os, const InnerSymTarget &target)
Printing InnerSymTarget's.
static bool operator==(const ModulePort &a, const ModulePort &b)
Definition HWTypes.h:35
static llvm::hash_code hash_value(const ModulePort &port)
Definition HWTypes.h:38
The InstanceGraph op interface, see InstanceGraphInterface.td for more details.
size_t hash_combine(size_t h1, size_t h2)
C++'s stdlib doesn't have a hash_combine function. This is a simple one.
Definition Utils.h:32
Definition rtg.py:1
static bool isEqual(const LabelValue &lhs, const LabelValue &rhs)
static unsigned getHashValue(const LabelValue &val)
static bool isEqual(const VirtualRegister &lhs, const VirtualRegister &rhs)
static unsigned getHashValue(const VirtualRegister &val)
static bool isEqual(const bool &lhs, const bool &rhs)
static unsigned getTombstoneKey()
static unsigned getHashValue(const bool &val)