diff options
Diffstat (limited to 'backends')
-rw-r--r-- | backends/cxxrtl/cxxrtl.h | 127 | ||||
-rw-r--r-- | backends/cxxrtl/cxxrtl_backend.cc | 1320 | ||||
-rw-r--r-- | backends/cxxrtl/cxxrtl_capi.cc | 23 | ||||
-rw-r--r-- | backends/cxxrtl/cxxrtl_capi.h | 56 | ||||
-rw-r--r-- | backends/cxxrtl/cxxrtl_vcd.h | 36 | ||||
-rw-r--r-- | backends/spice/spice.cc | 22 |
6 files changed, 1077 insertions, 507 deletions
diff --git a/backends/cxxrtl/cxxrtl.h b/backends/cxxrtl/cxxrtl.h index 41089a153..0e55c46c2 100644 --- a/backends/cxxrtl/cxxrtl.h +++ b/backends/cxxrtl/cxxrtl.h @@ -36,22 +36,48 @@ #include <map> #include <algorithm> #include <memory> +#include <functional> #include <sstream> #include <backends/cxxrtl/cxxrtl_capi.h> +#ifndef __has_attribute +# define __has_attribute(x) 0 +#endif + // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector. // It generates a lot of specialized template functions with relatively large bodies that, when inlined // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities. // Because of this, most of the CXXRTL runtime must be always inlined for best performance. -#ifndef __has_attribute -# define __has_attribute(x) 0 -#endif #if __has_attribute(always_inline) #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__)) #else #define CXXRTL_ALWAYS_INLINE inline #endif +// Conversely, some functions in the generated code are extremely large yet very cold, with both of these +// properties being extreme enough to confuse C++ compilers into spending pathological amounts of time +// on a futile (the code becomes worse) attempt to optimize the least important parts of code. +#if __has_attribute(optnone) +#define CXXRTL_EXTREMELY_COLD __attribute__((__optnone__)) +#elif __has_attribute(optimize) +#define CXXRTL_EXTREMELY_COLD __attribute__((__optimize__(0))) +#else +#define CXXRTL_EXTREMELY_COLD +#endif + +// CXXRTL uses assert() to check for C++ contract violations (which may result in e.g. undefined behavior +// of the simulation code itself), and CXXRTL_ASSERT to check for RTL contract violations (which may at +// most result in undefined simulation results). +// +// Though by default, CXXRTL_ASSERT() expands to assert(), it may be overridden e.g. when integrating +// the simulation into another process that should survive violating RTL contracts. +#ifndef CXXRTL_ASSERT +#ifndef CXXRTL_NDEBUG +#define CXXRTL_ASSERT(x) assert(x) +#else +#define CXXRTL_ASSERT(x) +#endif +#endif namespace cxxrtl { @@ -96,9 +122,11 @@ struct value : public expr_base<value<Bits>> { explicit constexpr value(Init ...init) : data{init...} {} value(const value<Bits> &) = default; - value(value<Bits> &&) = default; value<Bits> &operator=(const value<Bits> &) = default; + value(value<Bits> &&) = default; + value<Bits> &operator=(value<Bits> &&) = default; + // A (no-op) helper that forces the cast to value<>. CXXRTL_ALWAYS_INLINE const value<Bits> &val() const { @@ -289,6 +317,14 @@ struct value : public expr_base<value<Bits>> { return sext_cast<NewBits>()(*this); } + // Bit replication is far more efficient than the equivalent concatenation. + template<size_t Count> + CXXRTL_ALWAYS_INLINE + value<Bits * Count> repeat() const { + static_assert(Bits == 1, "repeat() is implemented only for 1-bit values"); + return *this ? value<Bits * Count>().bit_not() : value<Bits * Count>(); + } + // Operations with run-time parameters (offsets, amounts, etc). // // These operations are used for computations. @@ -643,14 +679,20 @@ struct wire { value<Bits> next; wire() = default; - constexpr wire(const value<Bits> &init) : curr(init), next(init) {} + explicit constexpr wire(const value<Bits> &init) : curr(init), next(init) {} template<typename... Init> explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {} + // Copying and copy-assigning values is natural. If, however, a value is replaced with a wire, + // e.g. because a module is built with a different optimization level, then existing code could + // unintentionally copy a wire instead, which would create a subtle but serious bug. To make sure + // this doesn't happen, prohibit copying and copy-assigning wires. wire(const wire<Bits> &) = delete; - wire(wire<Bits> &&) = default; wire<Bits> &operator=(const wire<Bits> &) = delete; + wire(wire<Bits> &&) = default; + wire<Bits> &operator=(wire<Bits> &&) = default; + template<class IntegerT> CXXRTL_ALWAYS_INLINE IntegerT get() const { @@ -692,6 +734,9 @@ struct memory { memory(const memory<Width> &) = delete; memory<Width> &operator=(const memory<Width> &) = delete; + memory(memory<Width> &&) = default; + memory<Width> &operator=(memory<Width> &&) = default; + // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is @@ -815,9 +860,12 @@ struct metadata { typedef std::map<std::string, metadata> metadata_map; -// Helper class to disambiguate values/wires and their aliases. +// Tag class to disambiguate values/wires and their aliases. struct debug_alias {}; +// Tag declaration to disambiguate values and debug outlines. +using debug_outline = ::_cxxrtl_outline; + // This structure is intended for consumption via foreign function interfaces, like Python's ctypes. // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++. // @@ -826,10 +874,11 @@ struct debug_alias {}; struct debug_item : ::cxxrtl_object { // Object types. enum : uint32_t { - VALUE = CXXRTL_VALUE, - WIRE = CXXRTL_WIRE, - MEMORY = CXXRTL_MEMORY, - ALIAS = CXXRTL_ALIAS, + VALUE = CXXRTL_VALUE, + WIRE = CXXRTL_WIRE, + MEMORY = CXXRTL_MEMORY, + ALIAS = CXXRTL_ALIAS, + OUTLINE = CXXRTL_OUTLINE, }; // Object flags. @@ -856,6 +905,7 @@ struct debug_item : ::cxxrtl_object { zero_at = 0; curr = item.data; next = item.data; + outline = nullptr; } template<size_t Bits> @@ -870,6 +920,7 @@ struct debug_item : ::cxxrtl_object { zero_at = 0; curr = const_cast<chunk_t*>(item.data); next = nullptr; + outline = nullptr; } template<size_t Bits> @@ -885,6 +936,7 @@ struct debug_item : ::cxxrtl_object { zero_at = 0; curr = item.curr.data; next = item.next.data; + outline = nullptr; } template<size_t Width> @@ -899,6 +951,7 @@ struct debug_item : ::cxxrtl_object { zero_at = zero_offset; curr = item.data.empty() ? nullptr : item.data[0].data; next = nullptr; + outline = nullptr; } template<size_t Bits> @@ -913,6 +966,7 @@ struct debug_item : ::cxxrtl_object { zero_at = 0; curr = const_cast<chunk_t*>(item.data); next = nullptr; + outline = nullptr; } template<size_t Bits> @@ -928,6 +982,22 @@ struct debug_item : ::cxxrtl_object { zero_at = 0; curr = const_cast<chunk_t*>(item.curr.data); next = nullptr; + outline = nullptr; + } + + template<size_t Bits> + debug_item(debug_outline &group, const value<Bits> &item, size_t lsb_offset = 0) { + static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t), + "value<Bits> is not compatible with C layout"); + type = OUTLINE; + flags = DRIVEN_COMB; + width = Bits; + lsb_at = lsb_offset; + depth = 1; + zero_at = 0; + curr = const_cast<chunk_t*>(item.data); + next = nullptr; + outline = &group; } }; static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout"); @@ -965,13 +1035,25 @@ struct debug_items { } }; +// Tag class to disambiguate module move constructor and module constructor that takes black boxes +// out of another instance of the module. +struct adopt {}; + struct module { module() {} virtual ~module() {} + // Modules with black boxes cannot be copied. Although not all designs include black boxes, + // delete the copy constructor and copy assignment operator to make sure that any downstream + // code that manipulates modules doesn't accidentally depend on their availability. module(const module &) = delete; module &operator=(const module &) = delete; + module(module &&) = default; + module &operator=(module &&) = default; + + virtual void reset() = 0; + virtual bool eval() = 0; virtual bool commit() = 0; @@ -992,11 +1074,16 @@ struct module { } // namespace cxxrtl -// Internal structure used to communicate with the implementation of the C interface. +// Internal structures used to communicate with the implementation of the C interface. + typedef struct _cxxrtl_toplevel { std::unique_ptr<cxxrtl::module> module; } *cxxrtl_toplevel; +typedef struct _cxxrtl_outline { + std::function<void()> eval; +} *cxxrtl_outline; + // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic // and indepenent of Yosys implementation details. // @@ -1130,49 +1217,49 @@ value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) { template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) { - return a.template zcast<BitsY>().template shl(b); + return a.template zcast<BitsY>().shl(b); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) { - return a.template scast<BitsY>().template shl(b); + return a.template scast<BitsY>().shl(b); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) { - return a.template zcast<BitsY>().template shl(b); + return a.template zcast<BitsY>().shl(b); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) { - return a.template scast<BitsY>().template shl(b); + return a.template scast<BitsY>().shl(b); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) { - return a.template shr(b).template zcast<BitsY>(); + return a.shr(b).template zcast<BitsY>(); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) { - return a.template shr(b).template scast<BitsY>(); + return a.shr(b).template scast<BitsY>(); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) { - return a.template shr(b).template zcast<BitsY>(); + return a.shr(b).template zcast<BitsY>(); } template<size_t BitsY, size_t BitsA, size_t BitsB> CXXRTL_ALWAYS_INLINE value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) { - return a.template sshr(b).template scast<BitsY>(); + return a.sshr(b).template scast<BitsY>(); } template<size_t BitsY, size_t BitsA, size_t BitsB> diff --git a/backends/cxxrtl/cxxrtl_backend.cc b/backends/cxxrtl/cxxrtl_backend.cc index a48ea5b23..39046bd78 100644 --- a/backends/cxxrtl/cxxrtl_backend.cc +++ b/backends/cxxrtl/cxxrtl_backend.cc @@ -195,7 +195,7 @@ bool is_extending_cell(RTLIL::IdString type) ID($reduce_and), ID($reduce_or), ID($reduce_xor), ID($reduce_xnor), ID($reduce_bool)); } -bool is_elidable_cell(RTLIL::IdString type) +bool is_inlinable_cell(RTLIL::IdString type) { return is_unary_cell(type) || is_binary_cell(type) || type.in( ID($mux), ID($concat), ID($slice), ID($pmux)); @@ -211,7 +211,12 @@ bool is_ff_cell(RTLIL::IdString type) bool is_internal_cell(RTLIL::IdString type) { - return type[0] == '$' && !type.begins_with("$paramod"); + return !type.isPublic() && !type.begins_with("$paramod"); +} + +bool is_effectful_cell(RTLIL::IdString type) +{ + return type == ID($memwr) || type.isPublic(); } bool is_cxxrtl_blackbox_cell(const RTLIL::Cell *cell) @@ -227,18 +232,15 @@ enum class CxxrtlPortType { SYNC = 2, }; -CxxrtlPortType cxxrtl_port_type(const RTLIL::Cell *cell, RTLIL::IdString port) +CxxrtlPortType cxxrtl_port_type(RTLIL::Module *module, RTLIL::IdString port) { - RTLIL::Module *cell_module = cell->module->design->module(cell->type); - if (cell_module == nullptr || !cell_module->get_bool_attribute(ID(cxxrtl_blackbox))) - return CxxrtlPortType::UNKNOWN; - RTLIL::Wire *cell_output_wire = cell_module->wire(port); - log_assert(cell_output_wire != nullptr); - bool is_comb = cell_output_wire->get_bool_attribute(ID(cxxrtl_comb)); - bool is_sync = cell_output_wire->get_bool_attribute(ID(cxxrtl_sync)); + RTLIL::Wire *output_wire = module->wire(port); + log_assert(output_wire != nullptr); + bool is_comb = output_wire->get_bool_attribute(ID(cxxrtl_comb)); + bool is_sync = output_wire->get_bool_attribute(ID(cxxrtl_sync)); if (is_comb && is_sync) log_cmd_error("Port `%s.%s' is marked as both `cxxrtl_comb` and `cxxrtl_sync`.\n", - log_id(cell_module), log_signal(cell_output_wire)); + log_id(module), log_signal(output_wire)); else if (is_comb) return CxxrtlPortType::COMB; else if (is_sync) @@ -246,6 +248,14 @@ CxxrtlPortType cxxrtl_port_type(const RTLIL::Cell *cell, RTLIL::IdString port) return CxxrtlPortType::UNKNOWN; } +CxxrtlPortType cxxrtl_port_type(const RTLIL::Cell *cell, RTLIL::IdString port) +{ + RTLIL::Module *cell_module = cell->module->design->module(cell->type); + if (cell_module == nullptr || !cell_module->get_bool_attribute(ID(cxxrtl_blackbox))) + return CxxrtlPortType::UNKNOWN; + return cxxrtl_port_type(cell_module, port); +} + bool is_cxxrtl_comb_port(const RTLIL::Cell *cell, RTLIL::IdString port) { return cxxrtl_port_type(cell, port) == CxxrtlPortType::COMB; @@ -262,7 +272,8 @@ struct FlowGraph { CONNECT, CELL_SYNC, CELL_EVAL, - PROCESS + PROCESS_SYNC, + PROCESS_CASE, }; Type type; @@ -273,7 +284,9 @@ struct FlowGraph { std::vector<Node*> nodes; dict<const RTLIL::Wire*, pool<Node*, hash_ptr_ops>> wire_comb_defs, wire_sync_defs, wire_uses; - dict<const RTLIL::Wire*, bool> wire_def_elidable, wire_use_elidable; + dict<Node*, pool<const RTLIL::Wire*>, hash_ptr_ops> node_comb_defs, node_sync_defs, node_uses; + dict<const RTLIL::Wire*, bool> wire_def_inlinable; + dict<const RTLIL::Wire*, dict<Node*, bool, hash_ptr_ops>> wire_use_inlinable; dict<RTLIL::SigBit, bool> bit_has_state; ~FlowGraph() @@ -282,7 +295,7 @@ struct FlowGraph { delete node; } - void add_defs(Node *node, const RTLIL::SigSpec &sig, bool is_ff, bool elidable) + void add_defs(Node *node, const RTLIL::SigSpec &sig, bool is_ff, bool inlinable) { for (auto chunk : sig.chunks()) if (chunk.wire) { @@ -290,17 +303,19 @@ struct FlowGraph { // A sync def means that a wire holds design state because it is driven directly by // a flip-flop output. Such a wire can never be unbuffered. wire_sync_defs[chunk.wire].insert(node); + node_sync_defs[node].insert(chunk.wire); } else { // A comb def means that a wire doesn't hold design state. It might still be connected, // indirectly, to a flip-flop output. wire_comb_defs[chunk.wire].insert(node); + node_comb_defs[node].insert(chunk.wire); } } for (auto bit : sig.bits()) bit_has_state[bit] |= is_ff; - // Only comb defs of an entire wire in the right order can be elided. + // Only comb defs of an entire wire in the right order can be inlined. if (!is_ff && sig.is_wire()) - wire_def_elidable[sig.as_wire()] = elidable; + wire_def_inlinable[sig.as_wire()] = inlinable; } void add_uses(Node *node, const RTLIL::SigSpec &sig) @@ -308,26 +323,41 @@ struct FlowGraph { for (auto chunk : sig.chunks()) if (chunk.wire) { wire_uses[chunk.wire].insert(node); - // Only a single use of an entire wire in the right order can be elided. - // (But the use can include other chunks.) - if (!wire_use_elidable.count(chunk.wire)) - wire_use_elidable[chunk.wire] = true; + node_uses[node].insert(chunk.wire); + // Only a single use of an entire wire in the right order can be inlined. (But the use can include + // other chunks.) This is tracked per-node because a wire used by multiple nodes can still be inlined + // if all but one of those nodes is dead. + if (!wire_use_inlinable[chunk.wire].count(node)) + wire_use_inlinable[chunk.wire][node] = true; else - wire_use_elidable[chunk.wire] = false; + wire_use_inlinable[chunk.wire][node] = false; } } - bool is_elidable(const RTLIL::Wire *wire) const + bool is_inlinable(const RTLIL::Wire *wire) const { - if (wire_def_elidable.count(wire) && wire_use_elidable.count(wire)) - return wire_def_elidable.at(wire) && wire_use_elidable.at(wire); + // Can the wire be inlined at all? + if (wire_def_inlinable.count(wire)) + return wire_def_inlinable.at(wire); + return false; + } + + bool is_inlinable(const RTLIL::Wire *wire, const pool<Node*, hash_ptr_ops> &nodes) const + { + // Can the wire be inlined, knowing that the given nodes are reachable? + if (nodes.size() != 1) + return false; + Node *node = *nodes.begin(); + log_assert(node_uses.at(node).count(wire)); + if (is_inlinable(wire) && wire_use_inlinable.count(wire) && wire_use_inlinable.at(wire).count(node)) + return wire_use_inlinable.at(wire).at(node); return false; } // Connections void add_connect_defs_uses(Node *node, const RTLIL::SigSig &conn) { - add_defs(node, conn.first, /*is_ff=*/false, /*elidable=*/true); + add_defs(node, conn.first, /*is_ff=*/false, /*inlinable=*/true); add_uses(node, conn.second); } @@ -373,8 +403,8 @@ struct FlowGraph { for (auto conn : cell->connections()) if (cell->output(conn.first)) if (is_cxxrtl_sync_port(cell, conn.first)) { - // See note regarding elidability below. - add_defs(node, conn.second, /*is_ff=*/false, /*elidable=*/false); + // See note regarding inlinability below. + add_defs(node, conn.second, /*is_ff=*/false, /*inlinable=*/false); } } @@ -382,19 +412,19 @@ struct FlowGraph { { for (auto conn : cell->connections()) { if (cell->output(conn.first)) { - if (is_elidable_cell(cell->type)) - add_defs(node, conn.second, /*is_ff=*/false, /*elidable=*/true); + if (is_inlinable_cell(cell->type)) + add_defs(node, conn.second, /*is_ff=*/false, /*inlinable=*/true); else if (is_ff_cell(cell->type) || (cell->type == ID($memrd) && cell->getParam(ID::CLK_ENABLE).as_bool())) - add_defs(node, conn.second, /*is_ff=*/true, /*elidable=*/false); + add_defs(node, conn.second, /*is_ff=*/true, /*inlinable=*/false); else if (is_internal_cell(cell->type)) - add_defs(node, conn.second, /*is_ff=*/false, /*elidable=*/false); + add_defs(node, conn.second, /*is_ff=*/false, /*inlinable=*/false); else if (!is_cxxrtl_sync_port(cell, conn.first)) { - // Although at first it looks like outputs of user-defined cells may always be elided, the reality is - // more complex. Fully sync outputs produce no defs and so don't participate in elision. Fully comb + // Although at first it looks like outputs of user-defined cells may always be inlined, the reality is + // more complex. Fully sync outputs produce no defs and so don't participate in inlining. Fully comb // outputs are assigned in a different way depending on whether the cell's eval() immediately converged. - // Unknown/mixed outputs could be elided, but should be rare in practical designs and don't justify - // the infrastructure required to elide outputs of cells with many of them. - add_defs(node, conn.second, /*is_ff=*/false, /*elidable=*/false); + // Unknown/mixed outputs could be inlined, but should be rare in practical designs and don't justify + // the infrastructure required to inline outputs of cells with many of them. + add_defs(node, conn.second, /*is_ff=*/false, /*inlinable=*/false); } } if (cell->input(conn.first)) @@ -429,10 +459,10 @@ struct FlowGraph { } // Processes - void add_case_defs_uses(Node *node, const RTLIL::CaseRule *case_) + void add_case_rule_defs_uses(Node *node, const RTLIL::CaseRule *case_) { for (auto &action : case_->actions) { - add_defs(node, action.first, /*is_ff=*/false, /*elidable=*/false); + add_defs(node, action.first, /*is_ff=*/false, /*inlinable=*/false); add_uses(node, action.second); } for (auto sub_switch : case_->switches) { @@ -440,20 +470,19 @@ struct FlowGraph { for (auto sub_case : sub_switch->cases) { for (auto &compare : sub_case->compare) add_uses(node, compare); - add_case_defs_uses(node, sub_case); + add_case_rule_defs_uses(node, sub_case); } } } - void add_process_defs_uses(Node *node, const RTLIL::Process *process) + void add_sync_rules_defs_uses(Node *node, const RTLIL::Process *process) { - add_case_defs_uses(node, &process->root_case); for (auto sync : process->syncs) for (auto action : sync->actions) { if (sync->type == RTLIL::STp || sync->type == RTLIL::STn || sync->type == RTLIL::STe) - add_defs(node, action.first, /*is_ff=*/true, /*elidable=*/false); + add_defs(node, action.first, /*is_ff=*/true, /*inlinable=*/false); else - add_defs(node, action.first, /*is_ff=*/false, /*elidable=*/false); + add_defs(node, action.first, /*is_ff=*/false, /*inlinable=*/false); add_uses(node, action.second); } } @@ -461,10 +490,16 @@ struct FlowGraph { Node *add_node(const RTLIL::Process *process) { Node *node = new Node; - node->type = Node::Type::PROCESS; + node->type = Node::Type::PROCESS_SYNC; node->process = process; nodes.push_back(node); - add_process_defs_uses(node, process); + add_sync_rules_defs_uses(node, process); + + node = new Node; + node->type = Node::Type::PROCESS_CASE; + node->process = process; + nodes.push_back(node); + add_case_rule_defs_uses(node, &process->root_case); return node; } }; @@ -520,6 +555,58 @@ std::string get_hdl_name(T *object) return object->name.str().substr(1); } +struct WireType { + enum Type { + // Non-referenced wire; is not a part of the design. + UNUSED, + // Double-buffered wire; is a class member, and holds design state. + BUFFERED, + // Single-buffered wire; is a class member, but holds no state. + MEMBER, + // Single-buffered wire; is a class member, and is computed on demand. + OUTLINE, + // Local wire; is a local variable in eval method. + LOCAL, + // Inline wire; is an unnamed temporary in eval method. + INLINE, + // Alias wire; is replaced with aliasee, except in debug info. + ALIAS, + // Const wire; is replaced with constant, except in debug info. + CONST, + }; + + Type type = UNUSED; + const RTLIL::Cell *cell_subst = nullptr; // for INLINE + RTLIL::SigSpec sig_subst = {}; // for INLINE, ALIAS, and CONST + + WireType() = default; + + WireType(Type type) : type(type) { + log_assert(type == UNUSED || type == BUFFERED || type == MEMBER || type == OUTLINE || type == LOCAL); + } + + WireType(Type type, const RTLIL::Cell *cell) : type(type), cell_subst(cell) { + log_assert(type == INLINE && is_inlinable_cell(cell->type)); + } + + WireType(Type type, RTLIL::SigSpec sig) : type(type), sig_subst(sig) { + log_assert(type == INLINE || (type == ALIAS && sig.is_wire()) || (type == CONST && sig.is_fully_const())); + } + + bool is_buffered() const { return type == BUFFERED; } + bool is_member() const { return type == BUFFERED || type == MEMBER || type == OUTLINE; } + bool is_outline() const { return type == OUTLINE; } + bool is_named() const { return is_member() || type == LOCAL; } + bool is_local() const { return type == LOCAL || type == INLINE; } + bool is_exact() const { return type == ALIAS || type == CONST; } +}; + +// Tests for a SigSpec that is a valid clock input, clocks have to have a backing wire and be a single bit +// using this instead of sig.is_wire() solves issues when the clock is a slice instead of a full wire +bool is_valid_clock(const RTLIL::SigSpec& sig) { + return sig.is_chunk() && sig.is_bit() && sig[0].wire; +} + struct CxxrtlWorker { bool split_intf = false; std::string intf_filename; @@ -535,10 +622,13 @@ struct CxxrtlWorker { bool unbuffer_public = false; bool localize_internal = false; bool localize_public = false; - bool elide_internal = false; - bool elide_public = false; + bool inline_internal = false; + bool inline_public = false; bool debug_info = false; + bool debug_member = false; + bool debug_alias = false; + bool debug_eval = false; std::ostringstream f; std::string indent; @@ -549,12 +639,8 @@ struct CxxrtlWorker { dict<RTLIL::SigBit, RTLIL::SyncType> edge_types; pool<const RTLIL::Memory*> writable_memories; dict<const RTLIL::Cell*, pool<const RTLIL::Cell*>> transparent_for; - dict<const RTLIL::Wire*, FlowGraph::Node> elided_wires; - dict<const RTLIL::Module*, std::vector<FlowGraph::Node>> schedule; - pool<const RTLIL::Wire*> unbuffered_wires; - pool<const RTLIL::Wire*> localized_wires; - dict<const RTLIL::Wire*, const RTLIL::Wire*> debug_alias_wires; - dict<const RTLIL::Wire*, RTLIL::Const> debug_const_wires; + dict<const RTLIL::Module*, std::vector<FlowGraph::Node>> schedule, debug_schedule; + dict<const RTLIL::Wire*, WireType> wire_types, debug_wire_types; dict<RTLIL::SigBit, bool> bit_has_state; dict<const RTLIL::Module*, pool<std::string>> blackbox_specializations; dict<const RTLIL::Module*, bool> eval_converges; @@ -786,30 +872,37 @@ struct CxxrtlWorker { dump_const(data, data.size()); } - bool dump_sigchunk(const RTLIL::SigChunk &chunk, bool is_lhs) + bool dump_sigchunk(const RTLIL::SigChunk &chunk, bool is_lhs, bool for_debug = false) { if (chunk.wire == NULL) { dump_const(chunk.data, chunk.width, chunk.offset); return false; } else { - if (elided_wires.count(chunk.wire)) { - log_assert(!is_lhs); - const FlowGraph::Node &node = elided_wires[chunk.wire]; - switch (node.type) { - case FlowGraph::Node::Type::CONNECT: - dump_connect_elided(node.connect); - break; - case FlowGraph::Node::Type::CELL_EVAL: - log_assert(is_elidable_cell(node.cell->type)); - dump_cell_elided(node.cell); + const auto &wire_type = (for_debug ? debug_wire_types : wire_types)[chunk.wire]; + switch (wire_type.type) { + case WireType::BUFFERED: + f << mangle(chunk.wire) << (is_lhs ? ".next" : ".curr"); + break; + case WireType::MEMBER: + case WireType::LOCAL: + case WireType::OUTLINE: + f << mangle(chunk.wire); + break; + case WireType::INLINE: + log_assert(!is_lhs); + if (wire_type.cell_subst != nullptr) { + dump_cell_expr(wire_type.cell_subst, for_debug); break; - default: - log_assert(false); - } - } else if (unbuffered_wires[chunk.wire]) { - f << mangle(chunk.wire); - } else { - f << mangle(chunk.wire) << (is_lhs ? ".next" : ".curr"); + } + YS_FALLTHROUGH + case WireType::ALIAS: + case WireType::CONST: + log_assert(!is_lhs); + return dump_sigspec(wire_type.sig_subst.extract(chunk.offset, chunk.width), is_lhs, for_debug); + case WireType::UNUSED: + log_assert(is_lhs); + f << "value<" << chunk.width << ">()"; + return false; } if (chunk.width == chunk.wire->width && chunk.offset == 0) return false; @@ -821,92 +914,116 @@ struct CxxrtlWorker { } } - bool dump_sigspec(const RTLIL::SigSpec &sig, bool is_lhs) + bool dump_sigspec(const RTLIL::SigSpec &sig, bool is_lhs, bool for_debug = false) { if (sig.empty()) { f << "value<0>()"; return false; } else if (sig.is_chunk()) { - return dump_sigchunk(sig.as_chunk(), is_lhs); + return dump_sigchunk(sig.as_chunk(), is_lhs, for_debug); } else { - dump_sigchunk(*sig.chunks().rbegin(), is_lhs); - for (auto it = sig.chunks().rbegin() + 1; it != sig.chunks().rend(); ++it) { - f << ".concat("; - dump_sigchunk(*it, is_lhs); - f << ")"; + bool first = true; + auto chunks = sig.chunks(); + for (auto it = chunks.rbegin(); it != chunks.rend(); it++) { + if (!first) + f << ".concat("; + bool is_complex = dump_sigchunk(*it, is_lhs, for_debug); + if (!is_lhs && it->width == 1) { + size_t repeat = 1; + while ((it + repeat) != chunks.rend() && *(it + repeat) == *it) + repeat++; + if (repeat > 1) { + if (is_complex) + f << ".val()"; + f << ".repeat<" << repeat << ">()"; + } + it += repeat - 1; + } + if (!first) + f << ")"; + first = false; } return true; } } - void dump_sigspec_lhs(const RTLIL::SigSpec &sig) + void dump_sigspec_lhs(const RTLIL::SigSpec &sig, bool for_debug = false) { - dump_sigspec(sig, /*is_lhs=*/true); + dump_sigspec(sig, /*is_lhs=*/true, for_debug); } - void dump_sigspec_rhs(const RTLIL::SigSpec &sig) + void dump_sigspec_rhs(const RTLIL::SigSpec &sig, bool for_debug = false) { // In the contexts where we want template argument deduction to occur for `template<size_t Bits> ... value<Bits>`, // it is necessary to have the argument to already be a `value<N>`, since template argument deduction and implicit // type conversion are mutually exclusive. In these contexts, we use dump_sigspec_rhs() to emit an explicit // type conversion, but only if the expression needs it. - bool is_complex = dump_sigspec(sig, /*is_lhs=*/false); + bool is_complex = dump_sigspec(sig, /*is_lhs=*/false, for_debug); if (is_complex) f << ".val()"; } - void collect_sigspec_rhs(const RTLIL::SigSpec &sig, std::vector<RTLIL::IdString> &cells) + void dump_inlined_cells(const std::vector<const RTLIL::Cell*> &cells) + { + if (cells.empty()) { + f << indent << "// connection\n"; + } else if (cells.size() == 1) { + dump_attrs(cells.front()); + f << indent << "// cell " << cells.front()->name.str() << "\n"; + } else { + f << indent << "// cells"; + for (auto cell : cells) + f << " " << cell->name.str(); + f << "\n"; + } + } + + void collect_sigspec_rhs(const RTLIL::SigSpec &sig, bool for_debug, std::vector<const RTLIL::Cell*> &cells) { for (auto chunk : sig.chunks()) { - if (!chunk.wire || !elided_wires.count(chunk.wire)) + if (!chunk.wire) continue; - - const FlowGraph::Node &node = elided_wires[chunk.wire]; - switch (node.type) { - case FlowGraph::Node::Type::CONNECT: - collect_connect(node.connect, cells); - break; - case FlowGraph::Node::Type::CELL_EVAL: - collect_cell_eval(node.cell, cells); + const auto &wire_type = wire_types[chunk.wire]; + switch (wire_type.type) { + case WireType::INLINE: + if (wire_type.cell_subst != nullptr) { + collect_cell_eval(wire_type.cell_subst, for_debug, cells); + break; + } + YS_FALLTHROUGH + case WireType::ALIAS: + collect_sigspec_rhs(wire_type.sig_subst, for_debug, cells); break; default: - log_assert(false); + break; } } } - void dump_connect_elided(const RTLIL::SigSig &conn) - { - dump_sigspec_rhs(conn.second); - } - - bool is_connect_elided(const RTLIL::SigSig &conn) + void dump_connect_expr(const RTLIL::SigSig &conn, bool for_debug = false) { - return conn.first.is_wire() && elided_wires.count(conn.first.as_wire()); + dump_sigspec_rhs(conn.second, for_debug); } - void collect_connect(const RTLIL::SigSig &conn, std::vector<RTLIL::IdString> &cells) + void dump_connect(const RTLIL::SigSig &conn, bool for_debug = false) { - if (!is_connect_elided(conn)) - return; + std::vector<const RTLIL::Cell*> inlined_cells; + collect_sigspec_rhs(conn.second, for_debug, inlined_cells); + dump_inlined_cells(inlined_cells); - collect_sigspec_rhs(conn.second, cells); - } - - void dump_connect(const RTLIL::SigSig &conn) - { - if (is_connect_elided(conn)) - return; - - f << indent << "// connection\n"; f << indent; - dump_sigspec_lhs(conn.first); + dump_sigspec_lhs(conn.first, for_debug); f << " = "; - dump_connect_elided(conn); + dump_connect_expr(conn, for_debug); f << ";\n"; } - void dump_cell_sync(const RTLIL::Cell *cell) + void collect_connect(const RTLIL::SigSig &conn, bool for_debug, std::vector<const RTLIL::Cell*> &cells) + { + collect_sigspec_rhs(conn.second, for_debug, cells); + } + + void dump_cell_sync(const RTLIL::Cell *cell, bool for_debug = false) { const char *access = is_cxxrtl_blackbox_cell(cell) ? "->" : "."; f << indent << "// cell " << cell->name.str() << " syncs\n"; @@ -914,12 +1031,12 @@ struct CxxrtlWorker { if (cell->output(conn.first)) if (is_cxxrtl_sync_port(cell, conn.first)) { f << indent; - dump_sigspec_lhs(conn.second); + dump_sigspec_lhs(conn.second, for_debug); f << " = " << mangle(cell) << access << mangle_wire_name(conn.first) << ".curr;\n"; } } - void dump_cell_elided(const RTLIL::Cell *cell) + void dump_cell_expr(const RTLIL::Cell *cell, bool for_debug = false) { // Unary cells if (is_unary_cell(cell->type)) { @@ -927,7 +1044,7 @@ struct CxxrtlWorker { if (is_extending_cell(cell->type)) f << '_' << (cell->getParam(ID::A_SIGNED).as_bool() ? 's' : 'u'); f << "<" << cell->getParam(ID::Y_WIDTH).as_int() << ">("; - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); f << ")"; // Binary cells } else if (is_binary_cell(cell->type)) { @@ -936,18 +1053,18 @@ struct CxxrtlWorker { f << '_' << (cell->getParam(ID::A_SIGNED).as_bool() ? 's' : 'u') << (cell->getParam(ID::B_SIGNED).as_bool() ? 's' : 'u'); f << "<" << cell->getParam(ID::Y_WIDTH).as_int() << ">("; - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); f << ", "; - dump_sigspec_rhs(cell->getPort(ID::B)); + dump_sigspec_rhs(cell->getPort(ID::B), for_debug); f << ")"; // Muxes } else if (cell->type == ID($mux)) { f << "("; - dump_sigspec_rhs(cell->getPort(ID::S)); + dump_sigspec_rhs(cell->getPort(ID::S), for_debug); f << " ? "; - dump_sigspec_rhs(cell->getPort(ID::B)); + dump_sigspec_rhs(cell->getPort(ID::B), for_debug); f << " : "; - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); f << ")"; // Parallel (one-hot) muxes } else if (cell->type == ID($pmux)) { @@ -955,24 +1072,24 @@ struct CxxrtlWorker { int s_width = cell->getParam(ID::S_WIDTH).as_int(); for (int part = 0; part < s_width; part++) { f << "("; - dump_sigspec_rhs(cell->getPort(ID::S).extract(part)); + dump_sigspec_rhs(cell->getPort(ID::S).extract(part), for_debug); f << " ? "; - dump_sigspec_rhs(cell->getPort(ID::B).extract(part * width, width)); + dump_sigspec_rhs(cell->getPort(ID::B).extract(part * width, width), for_debug); f << " : "; } - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); for (int part = 0; part < s_width; part++) { f << ")"; } // Concats } else if (cell->type == ID($concat)) { - dump_sigspec_rhs(cell->getPort(ID::B)); + dump_sigspec_rhs(cell->getPort(ID::B), for_debug); f << ".concat("; - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); f << ").val()"; // Slices } else if (cell->type == ID($slice)) { - dump_sigspec_rhs(cell->getPort(ID::A)); + dump_sigspec_rhs(cell->getPort(ID::A), for_debug); f << ".slice<"; f << cell->getParam(ID::OFFSET).as_int() + cell->getParam(ID::Y_WIDTH).as_int() - 1; f << ","; @@ -983,61 +1100,33 @@ struct CxxrtlWorker { } } - bool is_cell_elided(const RTLIL::Cell *cell) + void dump_cell_eval(const RTLIL::Cell *cell, bool for_debug = false) { - return is_elidable_cell(cell->type) && cell->hasPort(ID::Y) && cell->getPort(ID::Y).is_wire() && - elided_wires.count(cell->getPort(ID::Y).as_wire()); - } - - void collect_cell_eval(const RTLIL::Cell *cell, std::vector<RTLIL::IdString> &cells) - { - if (!is_cell_elided(cell)) - return; - - cells.push_back(cell->name); - for (auto port : cell->connections()) - if (port.first != ID::Y) - collect_sigspec_rhs(port.second, cells); - } - - void dump_cell_eval(const RTLIL::Cell *cell) - { - if (is_cell_elided(cell)) - return; - if (cell->type == ID($meminit)) - return; // Handled elsewhere. - - std::vector<RTLIL::IdString> elided_cells; - if (is_elidable_cell(cell->type)) { - for (auto port : cell->connections()) - if (port.first != ID::Y) - collect_sigspec_rhs(port.second, elided_cells); - } - if (elided_cells.empty()) { - dump_attrs(cell); - f << indent << "// cell " << cell->name.str() << "\n"; - } else { - f << indent << "// cells"; - for (auto elided_cell : elided_cells) - f << " " << elided_cell.str(); - f << "\n"; - } + std::vector<const RTLIL::Cell*> inlined_cells; + collect_cell_eval(cell, for_debug, inlined_cells); + dump_inlined_cells(inlined_cells); // Elidable cells - if (is_elidable_cell(cell->type)) { + if (is_inlinable_cell(cell->type)) { f << indent; - dump_sigspec_lhs(cell->getPort(ID::Y)); + dump_sigspec_lhs(cell->getPort(ID::Y), for_debug); f << " = "; - dump_cell_elided(cell); + dump_cell_expr(cell, for_debug); f << ";\n"; // Flip-flops } else if (is_ff_cell(cell->type)) { - if (cell->hasPort(ID::CLK) && cell->getPort(ID::CLK).is_wire()) { + log_assert(!for_debug); + // Clocks might be slices of larger signals but should only ever be single bit + if (cell->hasPort(ID::CLK) && is_valid_clock(cell->getPort(ID::CLK))) { // Edge-sensitive logic RTLIL::SigBit clk_bit = cell->getPort(ID::CLK)[0]; clk_bit = sigmaps[clk_bit.wire->module](clk_bit); - f << indent << "if (" << (cell->getParam(ID::CLK_POLARITY).as_bool() ? "posedge_" : "negedge_") - << mangle(clk_bit) << ") {\n"; + if (clk_bit.wire) { + f << indent << "if (" << (cell->getParam(ID::CLK_POLARITY).as_bool() ? "posedge_" : "negedge_") + << mangle(clk_bit) << ") {\n"; + } else { + f << indent << "if (false) {\n"; + } inc_indent(); if (cell->hasPort(ID::EN)) { f << indent << "if ("; @@ -1128,10 +1217,15 @@ struct CxxrtlWorker { // Memory ports } else if (cell->type.in(ID($memrd), ID($memwr))) { if (cell->getParam(ID::CLK_ENABLE).as_bool()) { + log_assert(!for_debug); RTLIL::SigBit clk_bit = cell->getPort(ID::CLK)[0]; clk_bit = sigmaps[clk_bit.wire->module](clk_bit); - f << indent << "if (" << (cell->getParam(ID::CLK_POLARITY).as_bool() ? "posedge_" : "negedge_") - << mangle(clk_bit) << ") {\n"; + if (clk_bit.wire) { + f << indent << "if (" << (cell->getParam(ID::CLK_POLARITY).as_bool() ? "posedge_" : "negedge_") + << mangle(clk_bit) << ") {\n"; + } else { + f << indent << "if (false) {\n"; + } inc_indent(); } RTLIL::Memory *memory = cell->module->memories[cell->getParam(ID::MEMID).decode_string()]; @@ -1149,12 +1243,12 @@ struct CxxrtlWorker { } // The generated code has two bounds checks; one in an assertion, and another that guards the read. // This is done so that the code does not invoke undefined behavior under any conditions, but nevertheless - // loudly crashes if an illegal condition is encountered. The assert may be turned off with -DNDEBUG not - // just for release builds, but also to make sure the simulator (which is presumably embedded in some + // loudly crashes if an illegal condition is encountered. The assert may be turned off with -DCXXRTL_NDEBUG + // not only for release builds, but also to make sure the simulator (which is presumably embedded in some // larger program) will never crash the code that calls into it. // // If assertions are disabled, out of bounds reads are defined to return zero. - f << indent << "assert(" << valid_index_temp << ".valid && \"out of bounds read\");\n"; + f << indent << "CXXRTL_ASSERT(" << valid_index_temp << ".valid && \"out of bounds read\");\n"; f << indent << "if(" << valid_index_temp << ".valid) {\n"; inc_indent(); if (writable_memories[memory]) { @@ -1211,7 +1305,7 @@ struct CxxrtlWorker { // See above for rationale of having both the assert and the condition. // // If assertions are disabled, out of bounds writes are defined to do nothing. - f << indent << "assert(" << valid_index_temp << ".valid && \"out of bounds write\");\n"; + f << indent << "CXXRTL_ASSERT(" << valid_index_temp << ".valid && \"out of bounds write\");\n"; f << indent << "if (" << valid_index_temp << ".valid) {\n"; inc_indent(); f << indent << mangle(memory) << ".update(" << valid_index_temp << ".index, "; @@ -1231,11 +1325,21 @@ struct CxxrtlWorker { log_cmd_error("Unsupported internal cell `%s'.\n", cell->type.c_str()); // User cells } else { + log_assert(!for_debug); log_assert(cell->known()); + bool buffered_inputs = false; const char *access = is_cxxrtl_blackbox_cell(cell) ? "->" : "."; for (auto conn : cell->connections()) - if (cell->input(conn.first) && !cell->output(conn.first)) { - f << indent << mangle(cell) << access << mangle_wire_name(conn.first) << " = "; + if (cell->input(conn.first)) { + RTLIL::Module *cell_module = cell->module->design->module(cell->type); + log_assert(cell_module != nullptr && cell_module->wire(conn.first) && conn.second.is_wire()); + RTLIL::Wire *cell_module_wire = cell_module->wire(conn.first); + f << indent << mangle(cell) << access << mangle_wire_name(conn.first); + if (!is_cxxrtl_blackbox_cell(cell) && wire_types[cell_module_wire].is_buffered()) { + buffered_inputs = true; + f << ".next"; + } + f << " = "; dump_sigspec_rhs(conn.second); f << ";\n"; if (getenv("CXXRTL_VOID_MY_WARRANTY")) { @@ -1247,19 +1351,11 @@ struct CxxrtlWorker { // with: // top.prev_p_clk = value<1>{0u}; top.p_clk = value<1>{1u}; top.step(); // Don't rely on this; it will be removed without warning. - RTLIL::Module *cell_module = cell->module->design->module(cell->type); - if (cell_module != nullptr && cell_module->wire(conn.first) && conn.second.is_wire()) { - RTLIL::Wire *cell_module_wire = cell_module->wire(conn.first); - if (edge_wires[conn.second.as_wire()] && edge_wires[cell_module_wire]) { - f << indent << mangle(cell) << access << "prev_" << mangle(cell_module_wire) << " = "; - f << "prev_" << mangle(conn.second.as_wire()) << ";\n"; - } + if (edge_wires[conn.second.as_wire()] && edge_wires[cell_module_wire]) { + f << indent << mangle(cell) << access << "prev_" << mangle(cell_module_wire) << " = "; + f << "prev_" << mangle(conn.second.as_wire()) << ";\n"; } } - } else if (cell->input(conn.first)) { - f << indent << mangle(cell) << access << mangle_wire_name(conn.first) << ".next = "; - dump_sigspec_rhs(conn.second); - f << ";\n"; } auto assign_from_outputs = [&](bool cell_converged) { for (auto conn : cell->connections()) { @@ -1277,9 +1373,9 @@ struct CxxrtlWorker { // have any buffered wires if they were not output ports. Imagine inlining the cell's eval() function, // and consider the fate of the localized wires that used to be output ports.) // - // Unlike cell inputs (which are never buffered), it is not possible to know apriori whether the cell - // (which may be late bound) will converge immediately. Because of this, the choice between using .curr - // (appropriate for buffered outputs) and .next (appropriate for unbuffered outputs) is made at runtime. + // It is not possible to know apriori whether the cell (which may be late bound) will converge immediately. + // Because of this, the choice between using .curr (appropriate for buffered outputs) and .next (appropriate + // for unbuffered outputs) is made at runtime. if (cell_converged && is_cxxrtl_comb_port(cell, conn.first)) f << ".next;\n"; else @@ -1287,19 +1383,34 @@ struct CxxrtlWorker { } } }; - f << indent << "if (" << mangle(cell) << access << "eval()) {\n"; - inc_indent(); - assign_from_outputs(/*cell_converged=*/true); - dec_indent(); - f << indent << "} else {\n"; - inc_indent(); + if (buffered_inputs) { + // If we have any buffered inputs, there's no chance of converging immediately. + f << indent << mangle(cell) << access << "eval();\n"; f << indent << "converged = false;\n"; assign_from_outputs(/*cell_converged=*/false); - dec_indent(); - f << indent << "}\n"; + } else { + f << indent << "if (" << mangle(cell) << access << "eval()) {\n"; + inc_indent(); + assign_from_outputs(/*cell_converged=*/true); + dec_indent(); + f << indent << "} else {\n"; + inc_indent(); + f << indent << "converged = false;\n"; + assign_from_outputs(/*cell_converged=*/false); + dec_indent(); + f << indent << "}\n"; + } } } + void collect_cell_eval(const RTLIL::Cell *cell, bool for_debug, std::vector<const RTLIL::Cell*> &cells) + { + cells.push_back(cell); + for (auto port : cell->connections()) + if (cell->input(port.first)) + collect_sigspec_rhs(port.second, for_debug, cells); + } + void dump_assign(const RTLIL::SigSig &sigsig) { f << indent; @@ -1383,13 +1494,19 @@ struct CxxrtlWorker { f << indent << "}\n"; } - void dump_process(const RTLIL::Process *proc) + void dump_process_case(const RTLIL::Process *proc) { dump_attrs(proc); - f << indent << "// process " << proc->name.str() << "\n"; + f << indent << "// process " << proc->name.str() << " case\n"; // The case attributes (for root case) are always empty. log_assert(proc->root_case.attributes.empty()); dump_case_rule(&proc->root_case); + } + + void dump_process_syncs(const RTLIL::Process *proc) + { + dump_attrs(proc); + f << indent << "// process " << proc->name.str() << " syncs\n"; for (auto sync : proc->syncs) { RTLIL::SigBit sync_bit; if (!sync->signal.empty()) { @@ -1442,78 +1559,89 @@ struct CxxrtlWorker { } } - void dump_wire(const RTLIL::Wire *wire, bool is_local_context) + void dump_wire(const RTLIL::Wire *wire, bool is_local) { - if (elided_wires.count(wire)) + const auto &wire_type = wire_types[wire]; + if (!wire_type.is_named() || wire_type.is_local() != is_local) return; - if (localized_wires[wire] && is_local_context) { - dump_attrs(wire); - f << indent << "value<" << wire->width << "> " << mangle(wire) << ";\n"; + dump_attrs(wire); + f << indent; + if (wire->port_input && wire->port_output) + f << "/*inout*/ "; + else if (wire->port_input) + f << "/*input*/ "; + else if (wire->port_output) + f << "/*output*/ "; + f << (wire_type.is_buffered() ? "wire" : "value"); + if (wire->module->has_attribute(ID(cxxrtl_blackbox)) && wire->has_attribute(ID(cxxrtl_width))) { + f << "<" << wire->get_string_attribute(ID(cxxrtl_width)) << ">"; + } else { + f << "<" << wire->width << ">"; } - if (!localized_wires[wire] && !is_local_context) { - std::string width; - if (wire->module->has_attribute(ID(cxxrtl_blackbox)) && wire->has_attribute(ID(cxxrtl_width))) { - width = wire->get_string_attribute(ID(cxxrtl_width)); - } else { - width = std::to_string(wire->width); - } - - dump_attrs(wire); - f << indent; - if (wire->port_input && wire->port_output) - f << "/*inout*/ "; - else if (wire->port_input) - f << "/*input*/ "; - else if (wire->port_output) - f << "/*output*/ "; - f << (unbuffered_wires[wire] ? "value" : "wire") << "<" << width << "> " << mangle(wire); - if (wire->has_attribute(ID::init)) { - f << " "; - dump_const_init(wire->attributes.at(ID::init)); + f << " " << mangle(wire); + if (wire->has_attribute(ID::init)) { + f << " "; + dump_const_init(wire->attributes.at(ID::init)); + } + f << ";\n"; + if (edge_wires[wire]) { + if (!wire_type.is_buffered()) { + f << indent << "value<" << wire->width << "> prev_" << mangle(wire); + if (wire->has_attribute(ID::init)) { + f << " "; + dump_const_init(wire->attributes.at(ID::init)); + } + f << ";\n"; } - f << ";\n"; - if (edge_wires[wire]) { - if (unbuffered_wires[wire]) { - f << indent << "value<" << width << "> prev_" << mangle(wire); - if (wire->has_attribute(ID::init)) { - f << " "; - dump_const_init(wire->attributes.at(ID::init)); + for (auto edge_type : edge_types) { + if (edge_type.first.wire == wire) { + std::string prev, next; + if (!wire_type.is_buffered()) { + prev = "prev_" + mangle(edge_type.first.wire); + next = mangle(edge_type.first.wire); + } else { + prev = mangle(edge_type.first.wire) + ".curr"; + next = mangle(edge_type.first.wire) + ".next"; } - f << ";\n"; - } - for (auto edge_type : edge_types) { - if (edge_type.first.wire == wire) { - std::string prev, next; - if (unbuffered_wires[wire]) { - prev = "prev_" + mangle(edge_type.first.wire); - next = mangle(edge_type.first.wire); - } else { - prev = mangle(edge_type.first.wire) + ".curr"; - next = mangle(edge_type.first.wire) + ".next"; - } - prev += ".slice<" + std::to_string(edge_type.first.offset) + ">().val()"; - next += ".slice<" + std::to_string(edge_type.first.offset) + ">().val()"; - if (edge_type.second != RTLIL::STn) { - f << indent << "bool posedge_" << mangle(edge_type.first) << "() const {\n"; - inc_indent(); - f << indent << "return !" << prev << " && " << next << ";\n"; - dec_indent(); - f << indent << "}\n"; - } - if (edge_type.second != RTLIL::STp) { - f << indent << "bool negedge_" << mangle(edge_type.first) << "() const {\n"; - inc_indent(); - f << indent << "return " << prev << " && !" << next << ";\n"; - dec_indent(); - f << indent << "}\n"; - } + prev += ".slice<" + std::to_string(edge_type.first.offset) + ">().val()"; + next += ".slice<" + std::to_string(edge_type.first.offset) + ">().val()"; + if (edge_type.second != RTLIL::STn) { + f << indent << "bool posedge_" << mangle(edge_type.first) << "() const {\n"; + inc_indent(); + f << indent << "return !" << prev << " && " << next << ";\n"; + dec_indent(); + f << indent << "}\n"; + } + if (edge_type.second != RTLIL::STp) { + f << indent << "bool negedge_" << mangle(edge_type.first) << "() const {\n"; + inc_indent(); + f << indent << "return " << prev << " && !" << next << ";\n"; + dec_indent(); + f << indent << "}\n"; } } } } } + void dump_debug_wire(const RTLIL::Wire *wire, bool is_local) + { + const auto &wire_type = wire_types[wire]; + if (wire_type.is_member()) + return; + + const auto &debug_wire_type = debug_wire_types[wire]; + if (!debug_wire_type.is_named() || debug_wire_type.is_local() != is_local) + return; + + dump_attrs(wire); + f << indent; + if (debug_wire_type.is_outline()) + f << "/*outline*/ "; + f << "value<" << wire->width << "> " << mangle(wire) << ";\n"; + } + void dump_memory(RTLIL::Module *module, const RTLIL::Memory *memory) { vector<const RTLIL::Cell*> init_cells; @@ -1581,7 +1709,7 @@ struct CxxrtlWorker { } } for (auto wire : module->wires()) - dump_wire(wire, /*is_local_context=*/true); + dump_wire(wire, /*is_local=*/true); for (auto node : schedule[module]) { switch (node.type) { case FlowGraph::Node::Type::CONNECT: @@ -1593,8 +1721,11 @@ struct CxxrtlWorker { case FlowGraph::Node::Type::CELL_EVAL: dump_cell_eval(node.cell); break; - case FlowGraph::Node::Type::PROCESS: - dump_process(node.process); + case FlowGraph::Node::Type::PROCESS_SYNC: + dump_process_syncs(node.process); + break; + case FlowGraph::Node::Type::PROCESS_CASE: + dump_process_case(node.process); break; } } @@ -1603,32 +1734,51 @@ struct CxxrtlWorker { dec_indent(); } + void dump_debug_eval_method(RTLIL::Module *module) + { + inc_indent(); + for (auto wire : module->wires()) + dump_debug_wire(wire, /*is_local=*/true); + for (auto node : debug_schedule[module]) { + switch (node.type) { + case FlowGraph::Node::Type::CONNECT: + dump_connect(node.connect, /*for_debug=*/true); + break; + case FlowGraph::Node::Type::CELL_SYNC: + dump_cell_sync(node.cell, /*for_debug=*/true); + break; + case FlowGraph::Node::Type::CELL_EVAL: + dump_cell_eval(node.cell, /*for_debug=*/true); + break; + default: + log_abort(); + } + } + dec_indent(); + } + void dump_commit_method(RTLIL::Module *module) { inc_indent(); f << indent << "bool changed = false;\n"; for (auto wire : module->wires()) { - if (elided_wires.count(wire)) - continue; - if (unbuffered_wires[wire]) { - if (edge_wires[wire]) - f << indent << "prev_" << mangle(wire) << " = " << mangle(wire) << ";\n"; - continue; - } - if (!module->get_bool_attribute(ID(cxxrtl_blackbox)) || wire->port_id != 0) - f << indent << "changed |= " << mangle(wire) << ".commit();\n"; + const auto &wire_type = wire_types[wire]; + if (wire_type.type == WireType::MEMBER && edge_wires[wire]) + f << indent << "prev_" << mangle(wire) << " = " << mangle(wire) << ";\n"; + if (wire_type.is_buffered()) + f << indent << "if (" << mangle(wire) << ".commit()) changed = true;\n"; } if (!module->get_bool_attribute(ID(cxxrtl_blackbox))) { for (auto memory : module->memories) { if (!writable_memories[memory.second]) continue; - f << indent << "changed |= " << mangle(memory.second) << ".commit();\n"; + f << indent << "if (" << mangle(memory.second) << ".commit()) changed = true;\n"; } for (auto cell : module->cells()) { if (is_internal_cell(cell->type)) continue; const char *access = is_cxxrtl_blackbox_cell(cell) ? "->" : "."; - f << indent << "changed |= " << mangle(cell) << access << "commit();\n"; + f << indent << "if (" << mangle(cell) << access << "commit()) changed = true;\n"; } } f << indent << "return changed;\n"; @@ -1638,96 +1788,134 @@ struct CxxrtlWorker { void dump_debug_info_method(RTLIL::Module *module) { size_t count_public_wires = 0; - size_t count_const_wires = 0; - size_t count_alias_wires = 0; size_t count_member_wires = 0; - size_t count_skipped_wires = 0; + size_t count_undriven = 0; size_t count_driven_sync = 0; size_t count_driven_comb = 0; - size_t count_undriven = 0; size_t count_mixed_driver = 0; + size_t count_alias_wires = 0; + size_t count_const_wires = 0; + size_t count_inline_wires = 0; + size_t count_skipped_wires = 0; inc_indent(); f << indent << "assert(path.empty() || path[path.size() - 1] == ' ');\n"; for (auto wire : module->wires()) { - if (wire->name[0] != '\\') - continue; - if (module->get_bool_attribute(ID(cxxrtl_blackbox)) && (wire->port_id == 0)) + const auto &debug_wire_type = debug_wire_types[wire]; + if (!wire->name.isPublic()) continue; count_public_wires++; - if (debug_const_wires.count(wire)) { - // Wire tied to a constant - f << indent << "static const value<" << wire->width << "> const_" << mangle(wire) << " = "; - dump_const(debug_const_wires[wire]); - f << ";\n"; - f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); - f << ", debug_item(const_" << mangle(wire) << ", "; - f << wire->start_offset << "));\n"; - count_const_wires++; - } else if (debug_alias_wires.count(wire)) { - // Alias of a member wire - f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); - f << ", debug_item(debug_alias(), " << mangle(debug_alias_wires[wire]) << ", "; - f << wire->start_offset << "));\n"; - count_alias_wires++; - } else if (!localized_wires.count(wire)) { - // Member wire - std::vector<std::string> flags; - - if (wire->port_input && wire->port_output) - flags.push_back("INOUT"); - else if (wire->port_input) - flags.push_back("INPUT"); - else if (wire->port_output) - flags.push_back("OUTPUT"); - - bool has_driven_sync = false; - bool has_driven_comb = false; - bool has_undriven = false; - SigSpec sig(wire); - for (auto bit : sig.bits()) - if (!bit_has_state.count(bit)) - has_undriven = true; - else if (bit_has_state[bit]) - has_driven_sync = true; - else - has_driven_comb = true; - if (has_driven_sync) - flags.push_back("DRIVEN_SYNC"); - if (has_driven_sync && !has_driven_comb && !has_undriven) - count_driven_sync++; - if (has_driven_comb) - flags.push_back("DRIVEN_COMB"); - if (!has_driven_sync && has_driven_comb && !has_undriven) - count_driven_comb++; - if (has_undriven) - flags.push_back("UNDRIVEN"); - if (!has_driven_sync && !has_driven_comb && has_undriven) - count_undriven++; - if (has_driven_sync + has_driven_comb + has_undriven > 1) - count_mixed_driver++; - - f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); - f << ", debug_item(" << mangle(wire) << ", "; - f << wire->start_offset; - bool first = true; - for (auto flag : flags) { - if (first) { - first = false; - f << ", "; + switch (debug_wire_type.type) { + case WireType::BUFFERED: + case WireType::MEMBER: { + // Member wire + std::vector<std::string> flags; + + if (wire->port_input && wire->port_output) + flags.push_back("INOUT"); + else if (wire->port_output) + flags.push_back("OUTPUT"); + else if (wire->port_input) + flags.push_back("INPUT"); + + bool has_driven_sync = false; + bool has_driven_comb = false; + bool has_undriven = false; + if (!module->get_bool_attribute(ID(cxxrtl_blackbox))) { + for (auto bit : SigSpec(wire)) + if (!bit_has_state.count(bit)) + has_undriven = true; + else if (bit_has_state[bit]) + has_driven_sync = true; + else + has_driven_comb = true; + } else if (wire->port_output) { + switch (cxxrtl_port_type(module, wire->name)) { + case CxxrtlPortType::SYNC: + has_driven_sync = true; + break; + case CxxrtlPortType::COMB: + has_driven_comb = true; + break; + case CxxrtlPortType::UNKNOWN: + has_driven_sync = has_driven_comb = true; + break; + } } else { - f << "|"; + has_undriven = true; } - f << "debug_item::" << flag; + if (has_undriven) + flags.push_back("UNDRIVEN"); + if (!has_driven_sync && !has_driven_comb && has_undriven) + count_undriven++; + if (has_driven_sync) + flags.push_back("DRIVEN_SYNC"); + if (has_driven_sync && !has_driven_comb && !has_undriven) + count_driven_sync++; + if (has_driven_comb) + flags.push_back("DRIVEN_COMB"); + if (!has_driven_sync && has_driven_comb && !has_undriven) + count_driven_comb++; + if (has_driven_sync + has_driven_comb + has_undriven > 1) + count_mixed_driver++; + + f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); + f << ", debug_item(" << mangle(wire) << ", " << wire->start_offset; + bool first = true; + for (auto flag : flags) { + if (first) { + first = false; + f << ", "; + } else { + f << "|"; + } + f << "debug_item::" << flag; + } + f << "));\n"; + count_member_wires++; + break; + } + case WireType::ALIAS: { + // Alias of a member wire + const RTLIL::Wire *aliasee = debug_wire_type.sig_subst.as_wire(); + f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); + f << ", debug_item("; + // If the aliasee is an outline, then the alias must be an outline, too; otherwise downstream + // tooling has no way to find out about the outline. + if (debug_wire_types[aliasee].is_outline()) + f << "debug_eval_outline"; + else + f << "debug_alias()"; + f << ", " << mangle(aliasee) << ", " << wire->start_offset << "));\n"; + count_alias_wires++; + break; + } + case WireType::CONST: { + // Wire tied to a constant + f << indent << "static const value<" << wire->width << "> const_" << mangle(wire) << " = "; + dump_const(debug_wire_type.sig_subst.as_const()); + f << ";\n"; + f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); + f << ", debug_item(const_" << mangle(wire) << ", " << wire->start_offset << "));\n"; + count_const_wires++; + break; + } + case WireType::OUTLINE: { + // Localized or inlined, but rematerializable wire + f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(wire)); + f << ", debug_item(debug_eval_outline, " << mangle(wire) << ", " << wire->start_offset << "));\n"; + count_inline_wires++; + break; + } + default: { + // Localized or inlined wire with no debug information + count_skipped_wires++; + break; } - f << "));\n"; - count_member_wires++; - } else { - count_skipped_wires++; } } if (!module->get_bool_attribute(ID(cxxrtl_blackbox))) { for (auto &memory_it : module->memories) { - if (memory_it.first[0] != '\\') + if (!memory_it.first.isPublic()) continue; f << indent << "items.add(path + " << escape_cxx_string(get_hdl_name(memory_it.second)); f << ", debug_item(" << mangle(memory_it.second) << ", "; @@ -1745,14 +1933,18 @@ struct CxxrtlWorker { log_debug("Debug information statistics for module `%s':\n", log_id(module)); log_debug(" Public wires: %zu, of which:\n", count_public_wires); - log_debug(" Const wires: %zu\n", count_const_wires); - log_debug(" Alias wires: %zu\n", count_alias_wires); log_debug(" Member wires: %zu, of which:\n", count_member_wires); + log_debug(" Undriven: %zu (incl. inputs)\n", count_undriven); log_debug(" Driven sync: %zu\n", count_driven_sync); log_debug(" Driven comb: %zu\n", count_driven_comb); - log_debug(" Undriven: %zu\n", count_undriven); log_debug(" Mixed driver: %zu\n", count_mixed_driver); - log_debug(" Other wires: %zu (no debug information)\n", count_skipped_wires); + if (!module->get_bool_attribute(ID(cxxrtl_blackbox))) { + log_debug(" Inline wires: %zu\n", count_inline_wires); + log_debug(" Alias wires: %zu\n", count_alias_wires); + log_debug(" Const wires: %zu\n", count_const_wires); + log_debug(" Other wires: %zu%s\n", count_skipped_wires, + count_skipped_wires > 0 ? " (debug unavailable)" : ""); + } } void dump_metadata_map(const dict<RTLIL::IdString, RTLIL::Const> &metadata_map) @@ -1792,7 +1984,7 @@ struct CxxrtlWorker { inc_indent(); for (auto wire : module->wires()) { if (wire->port_id != 0) - dump_wire(wire, /*is_local_context=*/false); + dump_wire(wire, /*is_local=*/false); } f << "\n"; f << indent << "bool eval() override {\n"; @@ -1838,8 +2030,9 @@ struct CxxrtlWorker { f << indent << "struct " << mangle(module) << " : public module {\n"; inc_indent(); for (auto wire : module->wires()) - dump_wire(wire, /*is_local_context=*/false); - f << "\n"; + dump_wire(wire, /*is_local=*/false); + for (auto wire : module->wires()) + dump_debug_wire(wire, /*is_local=*/false); bool has_memories = false; for (auto memory : module->memories) { dump_memory(module, memory.second); @@ -1869,10 +2062,62 @@ struct CxxrtlWorker { } if (has_cells) f << "\n"; + f << indent << mangle(module) << "() {}\n"; + if (has_cells) { + f << indent << mangle(module) << "(adopt, " << mangle(module) << " other) :\n"; + bool first = true; + for (auto cell : module->cells()) { + if (is_internal_cell(cell->type)) + continue; + if (first) { + first = false; + } else { + f << ",\n"; + } + RTLIL::Module *cell_module = module->design->module(cell->type); + if (cell_module->get_bool_attribute(ID(cxxrtl_blackbox))) { + f << indent << " " << mangle(cell) << "(std::move(other." << mangle(cell) << "))"; + } else { + f << indent << " " << mangle(cell) << "(adopt {}, std::move(other." << mangle(cell) << "))"; + } + } + f << " {\n"; + inc_indent(); + for (auto cell : module->cells()) { + if (is_internal_cell(cell->type)) + continue; + RTLIL::Module *cell_module = module->design->module(cell->type); + if (cell_module->get_bool_attribute(ID(cxxrtl_blackbox))) + f << indent << mangle(cell) << "->reset();\n"; + } + dec_indent(); + f << indent << "}\n"; + } else { + f << indent << mangle(module) << "(adopt, " << mangle(module) << " other) {}\n"; + } + f << "\n"; + f << indent << "void reset() override {\n"; + inc_indent(); + f << indent << "*this = " << mangle(module) << "(adopt {}, std::move(*this));\n"; + dec_indent(); + f << indent << "}\n"; + f << "\n"; f << indent << "bool eval() override;\n"; f << indent << "bool commit() override;\n"; - if (debug_info) + if (debug_info) { + if (debug_eval) { + f << "\n"; + f << indent << "void debug_eval();\n"; + for (auto wire : module->wires()) + if (debug_wire_types[wire].is_outline()) { + f << indent << "debug_outline debug_eval_outline { std::bind(&" + << mangle(module) << "::debug_eval, this) };\n"; + break; + } + } + f << "\n"; f << indent << "void debug_info(debug_items &items, std::string path = \"\") override;\n"; + } dec_indent(); f << indent << "}; // struct " << mangle(module) << "\n"; f << "\n"; @@ -1892,6 +2137,13 @@ struct CxxrtlWorker { f << indent << "}\n"; f << "\n"; if (debug_info) { + if (debug_eval) { + f << indent << "void " << mangle(module) << "::debug_eval() {\n"; + dump_debug_eval_method(module); + f << indent << "}\n"; + f << "\n"; + } + f << indent << "CXXRTL_EXTREMELY_COLD\n"; f << indent << "void " << mangle(module) << "::debug_info(debug_items &items, std::string path) {\n"; dump_debug_info_method(module); f << indent << "}\n"; @@ -2021,7 +2273,7 @@ struct CxxrtlWorker { void register_edge_signal(SigMap &sigmap, RTLIL::SigSpec signal, RTLIL::SyncType type) { signal = sigmap(signal); - log_assert(signal.is_wire() && signal.is_bit()); + log_assert(is_valid_clock(signal)); log_assert(type == RTLIL::STp || type == RTLIL::STn || type == RTLIL::STe); RTLIL::SigBit sigbit = signal[0]; @@ -2029,7 +2281,8 @@ struct CxxrtlWorker { edge_types[sigbit] = type; else if (edge_types[sigbit] != type) edge_types[sigbit] = RTLIL::STe; - edge_wires.insert(signal.as_wire()); + // Cannot use as_wire because signal might not be a full wire, instead extract the wire from the sigbit + edge_wires.insert(sigbit.wire); } void analyze_design(RTLIL::Design *design) @@ -2047,8 +2300,11 @@ struct CxxrtlWorker { if (module->get_bool_attribute(ID(cxxrtl_blackbox))) { for (auto port : module->ports) { RTLIL::Wire *wire = module->wire(port); - if (wire->port_input && !wire->port_output) - unbuffered_wires.insert(wire); + if (wire->port_input && !wire->port_output) { + wire_types[wire] = debug_wire_types[wire] = {WireType::MEMBER}; + } else if (wire->port_input || wire->port_output) { + wire_types[wire] = debug_wire_types[wire] = {WireType::BUFFERED}; + } if (wire->has_attribute(ID(cxxrtl_edge))) { RTLIL::Const edge_attr = wire->attributes[ID(cxxrtl_edge)]; if (!(edge_attr.flags & RTLIL::CONST_FLAG_STRING) || (int)edge_attr.decode_string().size() != GetSize(wire)) @@ -2078,6 +2334,8 @@ struct CxxrtlWorker { continue; } + // Construct a flow graph where each node is a basic computational operation generally corresponding + // to a fragment of the RTLIL netlist. FlowGraph flow; for (auto conn : module->connections()) @@ -2105,14 +2363,14 @@ struct CxxrtlWorker { // Various DFF cells are treated like posedge/negedge processes, see above for details. if (cell->type.in(ID($dff), ID($dffe), ID($adff), ID($adffe), ID($dffsr), ID($dffsre), ID($sdff), ID($sdffe), ID($sdffce))) { - if (cell->getPort(ID::CLK).is_wire()) + if (is_valid_clock(cell->getPort(ID::CLK))) register_edge_signal(sigmap, cell->getPort(ID::CLK), cell->parameters[ID::CLK_POLARITY].as_bool() ? RTLIL::STp : RTLIL::STn); } // Similar for memory port cells. if (cell->type.in(ID($memrd), ID($memwr))) { if (cell->getParam(ID::CLK_ENABLE).as_bool()) { - if (cell->getPort(ID::CLK).is_wire()) + if (is_valid_clock(cell->getPort(ID::CLK))) register_edge_signal(sigmap, cell->getPort(ID::CLK), cell->parameters[ID::CLK_POLARITY].as_bool() ? RTLIL::STp : RTLIL::STn); } @@ -2122,7 +2380,7 @@ struct CxxrtlWorker { if (cell->type == ID($memwr)) writable_memories.insert(module->memories[cell->getParam(ID::MEMID).decode_string()]); // Collect groups of memory write ports in the same domain. - if (cell->type == ID($memwr) && cell->getParam(ID::CLK_ENABLE).as_bool() && cell->getPort(ID::CLK).is_wire()) { + if (cell->type == ID($memwr) && cell->getParam(ID::CLK_ENABLE).as_bool() && is_valid_clock(cell->getPort(ID::CLK))) { RTLIL::SigBit clk_bit = sigmap(cell->getPort(ID::CLK))[0]; const RTLIL::Memory *memory = module->memories[cell->getParam(ID::MEMID).decode_string()]; memwr_per_domain[{clk_bit, memory}].insert(cell); @@ -2134,7 +2392,7 @@ struct CxxrtlWorker { } for (auto cell : module->cells()) { // Collect groups of memory write ports read by every transparent read port. - if (cell->type == ID($memrd) && cell->getParam(ID::CLK_ENABLE).as_bool() && cell->getPort(ID::CLK).is_wire() && + if (cell->type == ID($memrd) && cell->getParam(ID::CLK_ENABLE).as_bool() && is_valid_clock(cell->getPort(ID::CLK)) && cell->getParam(ID::TRANSPARENT).as_bool()) { RTLIL::SigBit clk_bit = sigmap(cell->getPort(ID::CLK))[0]; const RTLIL::Memory *memory = module->memories[cell->getParam(ID::MEMID).decode_string()]; @@ -2177,59 +2435,40 @@ struct CxxrtlWorker { } } - for (auto wire : module->wires()) { - if (!flow.is_elidable(wire)) continue; - if (wire->port_id != 0) continue; - if (wire->get_bool_attribute(ID::keep)) continue; - if (wire->name.begins_with("$") && !elide_internal) continue; - if (wire->name.begins_with("\\") && !elide_public) continue; - if (edge_wires[wire]) continue; - if (flow.wire_comb_defs[wire].size() > 1) - log_cmd_error("Wire %s.%s has multiple drivers.\n", log_id(module), log_id(wire)); - log_assert(flow.wire_comb_defs[wire].size() == 1); - elided_wires[wire] = **flow.wire_comb_defs[wire].begin(); - } - - dict<FlowGraph::Node*, pool<const RTLIL::Wire*>, hash_ptr_ops> node_defs; - for (auto wire_comb_def : flow.wire_comb_defs) - for (auto node : wire_comb_def.second) - node_defs[node].insert(wire_comb_def.first); - + // Construct a linear order of the flow graph that minimizes the amount of feedback arcs. A flow graph + // without feedback arcs can generally be evaluated in a single pass, i.e. it always requires only + // a single delta cycle. Scheduler<FlowGraph::Node> scheduler; - dict<FlowGraph::Node*, Scheduler<FlowGraph::Node>::Vertex*, hash_ptr_ops> node_map; + dict<FlowGraph::Node*, Scheduler<FlowGraph::Node>::Vertex*, hash_ptr_ops> node_vertex_map; for (auto node : flow.nodes) - node_map[node] = scheduler.add(node); - for (auto node_def : node_defs) { - auto vertex = node_map[node_def.first]; - for (auto wire : node_def.second) + node_vertex_map[node] = scheduler.add(node); + for (auto node_comb_def : flow.node_comb_defs) { + auto vertex = node_vertex_map[node_comb_def.first]; + for (auto wire : node_comb_def.second) for (auto succ_node : flow.wire_uses[wire]) { - auto succ_vertex = node_map[succ_node]; + auto succ_vertex = node_vertex_map[succ_node]; vertex->succs.insert(succ_vertex); succ_vertex->preds.insert(vertex); } } - auto eval_order = scheduler.schedule(); - pool<FlowGraph::Node*, hash_ptr_ops> evaluated; + // Find out whether the order includes any feedback arcs. + std::vector<FlowGraph::Node*> node_order; + pool<FlowGraph::Node*, hash_ptr_ops> evaluated_nodes; pool<const RTLIL::Wire*> feedback_wires; - for (auto vertex : eval_order) { + for (auto vertex : scheduler.schedule()) { auto node = vertex->data; - schedule[module].push_back(*node); + node_order.push_back(node); // Any wire that is an output of node vo and input of node vi where vo is scheduled later than vi // is a feedback wire. Feedback wires indicate apparent logic loops in the design, which may be // caused by a true logic loop, but usually are a benign result of dependency tracking that works - // on wire, not bit, level. Nevertheless, feedback wires cannot be localized. - evaluated.insert(node); - for (auto wire : node_defs[node]) + // on wire, not bit, level. Nevertheless, feedback wires cannot be unbuffered. + evaluated_nodes.insert(node); + for (auto wire : flow.node_comb_defs[node]) for (auto succ_node : flow.wire_uses[wire]) - if (evaluated[succ_node]) { + if (evaluated_nodes[succ_node]) feedback_wires.insert(wire); - // Feedback wires may never be elided because feedback requires state, but the point of elision - // (and localization) is to eliminate state. - elided_wires.erase(wire); - } } - if (!feedback_wires.empty()) { has_feedback_arcs = true; log("Module `%s' contains feedback arcs through wires:\n", log_id(module)); @@ -2237,21 +2476,86 @@ struct CxxrtlWorker { log(" %s\n", log_id(wire)); } + // Conservatively assign wire types. Assignment of types BUFFERED and MEMBER is final, but assignment + // of type LOCAL may be further refined to UNUSED or INLINE. for (auto wire : module->wires()) { + auto &wire_type = wire_types[wire]; + wire_type = {WireType::BUFFERED}; + if (feedback_wires[wire]) continue; if (wire->port_output && !module->get_bool_attribute(ID::top)) continue; - if (wire->name.begins_with("$") && !unbuffer_internal) continue; - if (wire->name.begins_with("\\") && !unbuffer_public) continue; + if (!wire->name.isPublic() && !unbuffer_internal) continue; + if (wire->name.isPublic() && !unbuffer_public) continue; if (flow.wire_sync_defs.count(wire) > 0) continue; - unbuffered_wires.insert(wire); + wire_type = {WireType::MEMBER}; + if (edge_wires[wire]) continue; if (wire->get_bool_attribute(ID::keep)) continue; if (wire->port_input || wire->port_output) continue; - if (wire->name.begins_with("$") && !localize_internal) continue; - if (wire->name.begins_with("\\") && !localize_public) continue; - localized_wires.insert(wire); + if (!wire->name.isPublic() && !localize_internal) continue; + if (wire->name.isPublic() && !localize_public) continue; + wire_type = {WireType::LOCAL}; + } + + // Discover nodes reachable from primary outputs (i.e. members) and collect reachable wire users. + pool<FlowGraph::Node*, hash_ptr_ops> worklist; + for (auto node : flow.nodes) { + if (node->type == FlowGraph::Node::Type::CELL_EVAL && is_effectful_cell(node->cell->type)) + worklist.insert(node); // node has effects + else if (flow.node_sync_defs.count(node)) + worklist.insert(node); // node is a flip-flop + else if (flow.node_comb_defs.count(node)) { + for (auto wire : flow.node_comb_defs[node]) + if (wire_types[wire].is_member()) + worklist.insert(node); // node drives public wires + } + } + dict<const RTLIL::Wire*, pool<FlowGraph::Node*, hash_ptr_ops>> live_wires; + pool<FlowGraph::Node*, hash_ptr_ops> live_nodes; + while (!worklist.empty()) { + auto node = worklist.pop(); + live_nodes.insert(node); + for (auto wire : flow.node_uses[node]) { + live_wires[wire].insert(node); + for (auto pred_node : flow.wire_comb_defs[wire]) + if (!live_nodes[pred_node]) + worklist.insert(pred_node); + } + } + + // Refine wire types taking into account the amount of uses from reachable nodes only. + for (auto wire : module->wires()) { + auto &wire_type = wire_types[wire]; + if (!wire_type.is_local()) continue; + if (!wire->name.isPublic() && !inline_internal) continue; + if (wire->name.isPublic() && !inline_public) continue; + + if (live_wires[wire].empty()) { + wire_type = {WireType::UNUSED}; // wire never used + } else if (flow.is_inlinable(wire, live_wires[wire])) { + if (flow.wire_comb_defs[wire].size() > 1) + log_cmd_error("Wire %s.%s has multiple drivers!\n", log_id(module), log_id(wire)); + log_assert(flow.wire_comb_defs[wire].size() == 1); + FlowGraph::Node *node = *flow.wire_comb_defs[wire].begin(); + switch (node->type) { + case FlowGraph::Node::Type::CELL_EVAL: + if (!is_inlinable_cell(node->cell->type)) continue; + wire_type = {WireType::INLINE, node->cell}; // wire replaced with cell + break; + case FlowGraph::Node::Type::CONNECT: + wire_type = {WireType::INLINE, node->connect.second}; // wire replaced with sig + break; + default: continue; + } + live_nodes.erase(node); + } } + // Emit reachable nodes in eval(). + for (auto node : node_order) + if (live_nodes[node]) + schedule[module].push_back(*node); + // For maximum performance, the state of the simulation (which is the same as the set of its double buffered // wires, since using a singly buffered wire for any kind of state introduces a race condition) should contain // no wires attached to combinatorial outputs. Feedback wires, by definition, make that impossible. However, @@ -2259,10 +2563,9 @@ struct CxxrtlWorker { // as a wire with multiple drivers where one of them is combinatorial and the other is synchronous. Such designs // also require more than one delta cycle to converge. pool<const RTLIL::Wire*> buffered_comb_wires; - for (auto wire : module->wires()) { - if (flow.wire_comb_defs[wire].size() > 0 && !unbuffered_wires[wire] && !feedback_wires[wire]) + for (auto wire : module->wires()) + if (wire_types[wire].is_buffered() && !feedback_wires[wire] && flow.wire_comb_defs[wire].size() > 0) buffered_comb_wires.insert(wire); - } if (!buffered_comb_wires.empty()) { has_buffered_comb_wires = true; log("Module `%s' contains buffered combinatorial wires:\n", log_id(module)); @@ -2270,47 +2573,116 @@ struct CxxrtlWorker { log(" %s\n", log_id(wire)); } + // Record whether eval() requires only one delta cycle in this module. eval_converges[module] = feedback_wires.empty() && buffered_comb_wires.empty(); - for (auto item : flow.bit_has_state) - bit_has_state.insert(item); - if (debug_info) { - // Find wires that alias other wires or are tied to a constant; debug information can be enriched with these - // at essentially zero additional cost. - // - // Note that the information collected here can't be used for optimizing the netlist: debug information queries - // are pure and run on a design in a stable state, which allows assumptions that do not otherwise hold. + // Annotate wire bits with the type of their driver; this is exposed in the debug metadata. + for (auto item : flow.bit_has_state) + bit_has_state.insert(item); + + // Assign debug information wire types to public wires according to the chosen debug level. + // Unlike with optimized wire types, all assignments here are final. for (auto wire : module->wires()) { - if (wire->name[0] != '\\') - continue; - if (!unbuffered_wires[wire]) - continue; - const RTLIL::Wire *wire_it = wire; - while (1) { - if (!(flow.wire_def_elidable.count(wire_it) && flow.wire_def_elidable[wire_it])) - break; // not an alias: complex def - log_assert(flow.wire_comb_defs[wire_it].size() == 1); - FlowGraph::Node *node = *flow.wire_comb_defs[wire_it].begin(); - if (node->type != FlowGraph::Node::Type::CONNECT) - break; // not an alias: def by cell - RTLIL::SigSpec rhs_sig = node->connect.second; - if (rhs_sig.is_wire()) { - RTLIL::Wire *rhs_wire = rhs_sig.as_wire(); - if (unbuffered_wires[rhs_wire]) { - wire_it = rhs_wire; // maybe an alias - } else { - debug_alias_wires[wire] = rhs_wire; // is an alias + const auto &wire_type = wire_types[wire]; + auto &debug_wire_type = debug_wire_types[wire]; + if (wire_type.type == WireType::UNUSED) continue; + if (!wire->name.isPublic()) continue; + + if (!debug_info) continue; + if (wire->port_input || wire_type.is_buffered()) + debug_wire_type = wire_type; // wire contains state + + if (!debug_member) continue; + if (wire_type.is_member()) + debug_wire_type = wire_type; // wire is a member + + if (!debug_alias) continue; + const RTLIL::Wire *it = wire; + while (flow.is_inlinable(it)) { + log_assert(flow.wire_comb_defs[it].size() == 1); + FlowGraph::Node *node = *flow.wire_comb_defs[it].begin(); + if (node->type != FlowGraph::Node::Type::CONNECT) break; // not an alias + RTLIL::SigSpec rhs = node->connect.second; + if (rhs.is_fully_const()) { + debug_wire_type = {WireType::CONST, rhs}; // wire replaced with const + } else if (rhs.is_wire()) { + if (wire_types[rhs.as_wire()].is_member()) + debug_wire_type = {WireType::ALIAS, rhs}; // wire replaced with wire + else if (debug_eval && rhs.as_wire()->name.isPublic()) + debug_wire_type = {WireType::ALIAS, rhs}; // wire replaced with outline + it = rhs.as_wire(); // and keep looking + continue; + } + break; + } + + if (!debug_eval) continue; + if (!debug_wire_type.is_exact() && !wire_type.is_member()) + debug_wire_type = {WireType::OUTLINE}; // wire is local or inlined + } + + // Discover nodes reachable from primary outputs (i.e. outlines) up until primary inputs (i.e. members) + // and collect reachable wire users. + pool<FlowGraph::Node*, hash_ptr_ops> worklist; + for (auto node : flow.nodes) { + if (flow.node_comb_defs.count(node)) + for (auto wire : flow.node_comb_defs[node]) + if (debug_wire_types[wire].is_outline()) + worklist.insert(node); // node drives outline + } + dict<const RTLIL::Wire*, pool<FlowGraph::Node*, hash_ptr_ops>> debug_live_wires; + pool<FlowGraph::Node*, hash_ptr_ops> debug_live_nodes; + while (!worklist.empty()) { + auto node = worklist.pop(); + debug_live_nodes.insert(node); + for (auto wire : flow.node_uses[node]) { + if (debug_wire_types[wire].is_member()) + continue; // node uses member + if (debug_wire_types[wire].is_exact()) + continue; // node uses alias or const + debug_live_wires[wire].insert(node); + for (auto pred_node : flow.wire_comb_defs[wire]) + if (!debug_live_nodes[pred_node]) + worklist.insert(pred_node); + } + } + + // Assign debug information wire types to internal wires used by reachable nodes. This is similar + // to refining optimized wire types with the exception that the assignments here are first and final. + for (auto wire : module->wires()) { + const auto &wire_type = wire_types[wire]; + auto &debug_wire_type = debug_wire_types[wire]; + if (wire->name.isPublic()) continue; + + if (live_wires[wire].empty() || debug_live_wires[wire].empty()) { + continue; // wire never used + } else if (flow.is_inlinable(wire, debug_live_wires[wire])) { + log_assert(flow.wire_comb_defs[wire].size() == 1); + FlowGraph::Node *node = *flow.wire_comb_defs[wire].begin(); + switch (node->type) { + case FlowGraph::Node::Type::CELL_EVAL: + if (!is_inlinable_cell(node->cell->type)) continue; + debug_wire_type = {WireType::INLINE, node->cell}; // wire replaced with cell break; - } - } else if (rhs_sig.is_fully_const()) { - debug_const_wires[wire] = rhs_sig.as_const(); // is a const - break; - } else { - break; // not an alias: complex rhs + case FlowGraph::Node::Type::CONNECT: + debug_wire_type = {WireType::INLINE, node->connect.second}; // wire replaced with sig + break; + default: continue; } + debug_live_nodes.erase(node); + } else if (wire_type.is_local()) { + debug_wire_type = {WireType::LOCAL}; // wire not inlinable + } else { + log_assert(wire_type.is_member()); + debug_wire_type = wire_type; // wire is a member } } + + // Emit reachable nodes in debug_eval(). + for (auto node : node_order) + if (debug_live_nodes[node]) + debug_schedule[module].push_back(*node); } } if (has_feedback_arcs || has_buffered_comb_wires) { @@ -2401,8 +2773,7 @@ struct CxxrtlWorker { struct CxxrtlBackend : public Backend { static const int DEFAULT_OPT_LEVEL = 6; - static const int OPT_LEVEL_DEBUG = 4; - static const int DEFAULT_DEBUG_LEVEL = 1; + static const int DEFAULT_DEBUG_LEVEL = 4; CxxrtlBackend() : Backend("cxxrtl", "convert design to C++ RTL simulation") { } void help() override @@ -2598,13 +2969,13 @@ struct CxxrtlBackend : public Backend { log(" no optimization.\n"); log("\n"); log(" -O1\n"); - log(" localize internal wires if possible.\n"); + log(" unbuffer internal wires if possible.\n"); log("\n"); log(" -O2\n"); - log(" like -O1, and unbuffer internal wires if possible.\n"); + log(" like -O1, and localize internal wires if possible.\n"); log("\n"); log(" -O3\n"); - log(" like -O2, and elide internal wires if possible.\n"); + log(" like -O2, and inline internal wires if possible.\n"); log("\n"); log(" -O4\n"); log(" like -O3, and unbuffer public wires not marked (*keep*) if possible.\n"); @@ -2613,22 +2984,30 @@ struct CxxrtlBackend : public Backend { log(" like -O4, and localize public wires not marked (*keep*) if possible.\n"); log("\n"); log(" -O6\n"); - log(" like -O5, and elide public wires not marked (*keep*) if possible.\n"); - log("\n"); - log(" -Og\n"); - log(" highest optimization level that provides debug information for all\n"); - log(" public wires. currently, alias for -O%d.\n", OPT_LEVEL_DEBUG); + log(" like -O5, and inline public wires not marked (*keep*) if possible.\n"); log("\n"); log(" -g <level>\n"); log(" set the debug level. the default is -g%d. higher debug levels provide\n", DEFAULT_DEBUG_LEVEL); log(" more visibility and generate more code, but do not pessimize evaluation.\n"); log("\n"); log(" -g0\n"); - log(" no debug information.\n"); + log(" no debug information. the C API is disabled.\n"); log("\n"); log(" -g1\n"); - log(" debug information for non-optimized public wires. this also makes it\n"); - log(" possible to use the C API.\n"); + log(" include bare minimum of debug information necessary to access all design\n"); + log(" state. the C API is enabled.\n"); + log("\n"); + log(" -g2\n"); + log(" like -g1, but include debug information for all public wires that are\n"); + log(" directly accessible through the C++ interface.\n"); + log("\n"); + log(" -g3\n"); + log(" like -g2, and include debug information for public wires that are tied\n"); + log(" to a constant or another public wire.\n"); + log("\n"); + log(" -g4\n"); + log(" like -g3, and compute debug information on demand for all public wires\n"); + log(" that were optimized out.\n"); log("\n"); } @@ -2659,12 +3038,14 @@ struct CxxrtlBackend : public Backend { continue; } if (args[argidx] == "-Og") { - opt_level = OPT_LEVEL_DEBUG; + log_warning("The `-Og` option has been removed. Use `-g3` instead for complete " + "design coverage regardless of optimization level.\n"); continue; } if (args[argidx] == "-O" && argidx+1 < args.size() && args[argidx+1] == "g") { argidx++; - opt_level = OPT_LEVEL_DEBUG; + log_warning("The `-Og` option has been removed. Use `-g3` instead for complete " + "design coverage regardless of optimization level.\n"); continue; } if (args[argidx] == "-O" && argidx+1 < args.size()) { @@ -2701,7 +3082,7 @@ struct CxxrtlBackend : public Backend { switch (opt_level) { // the highest level here must match DEFAULT_OPT_LEVEL case 6: - worker.elide_public = true; + worker.inline_public = true; YS_FALLTHROUGH case 5: worker.localize_public = true; @@ -2710,7 +3091,7 @@ struct CxxrtlBackend : public Backend { worker.unbuffer_public = true; YS_FALLTHROUGH case 3: - worker.elide_internal = true; + worker.inline_internal = true; YS_FALLTHROUGH case 2: worker.localize_internal = true; @@ -2725,6 +3106,15 @@ struct CxxrtlBackend : public Backend { } switch (debug_level) { // the highest level here must match DEFAULT_DEBUG_LEVEL + case 4: + worker.debug_eval = true; + YS_FALLTHROUGH + case 3: + worker.debug_alias = true; + YS_FALLTHROUGH + case 2: + worker.debug_member = true; + YS_FALLTHROUGH case 1: worker.debug_info = true; YS_FALLTHROUGH diff --git a/backends/cxxrtl/cxxrtl_capi.cc b/backends/cxxrtl/cxxrtl_capi.cc index b77e4c491..227173ba8 100644 --- a/backends/cxxrtl/cxxrtl_capi.cc +++ b/backends/cxxrtl/cxxrtl_capi.cc @@ -32,9 +32,22 @@ const cxxrtl::debug_items &cxxrtl_debug_items_from_handle(cxxrtl_handle handle) } cxxrtl_handle cxxrtl_create(cxxrtl_toplevel design) { + return cxxrtl_create_at(design, ""); +} + +cxxrtl_handle cxxrtl_create_at(cxxrtl_toplevel design, const char *root) { + std::string path = root; + if (!path.empty()) { + // module::debug_info() accepts either an empty path, or a path ending in space to simplify + // the logic in generated code. While this is sketchy at best to expose in the C++ API, this + // would be a lot worse in the C API, so don't expose it here. + assert(path.back() != ' '); + path += ' '; + } + cxxrtl_handle handle = new _cxxrtl_handle; handle->module = std::move(design->module); - handle->module->debug_info(handle->objects); + handle->module->debug_info(handle->objects, path); delete design; return handle; } @@ -43,6 +56,10 @@ void cxxrtl_destroy(cxxrtl_handle handle) { delete handle; } +void cxxrtl_reset(cxxrtl_handle handle) { + handle->module->reset(); +} + int cxxrtl_eval(cxxrtl_handle handle) { return handle->module->eval(); } @@ -69,3 +86,7 @@ void cxxrtl_enum(cxxrtl_handle handle, void *data, for (auto &it : handle->objects.table) callback(data, it.first.c_str(), static_cast<cxxrtl_object*>(&it.second[0]), it.second.size()); } + +void cxxrtl_outline_eval(cxxrtl_outline outline) { + outline->eval(); +} diff --git a/backends/cxxrtl/cxxrtl_capi.h b/backends/cxxrtl/cxxrtl_capi.h index 385d6dcf3..2df2b7287 100644 --- a/backends/cxxrtl/cxxrtl_capi.h +++ b/backends/cxxrtl/cxxrtl_capi.h @@ -52,9 +52,23 @@ typedef struct _cxxrtl_handle *cxxrtl_handle; // The `design` is consumed by this operation and cannot be used afterwards. cxxrtl_handle cxxrtl_create(cxxrtl_toplevel design); +// Create a design handle at a given hierarchy position from a design toplevel. +// +// This operation is similar to `cxxrtl_create`, except the full hierarchical name of every object +// is prepended with `root`. +cxxrtl_handle cxxrtl_create_at(cxxrtl_toplevel design, const char *root); + // Release all resources used by a design and its handle. void cxxrtl_destroy(cxxrtl_handle handle); +// Reinitialize the design, replacing the internal state with the reset values while preserving +// black boxes. +// +// This operation is essentially equivalent to a power-on reset. Values, wires, and memories are +// returned to their reset state while preserving the state of black boxes and keeping all of +// the interior pointers obtained with e.g. `cxxrtl_get` valid. +void cxxrtl_reset(cxxrtl_handle handle); + // Evaluate the design, propagating changes on inputs to the `next` value of internal state and // output wires. // @@ -114,6 +128,18 @@ enum cxxrtl_type { // pointer is always NULL. CXXRTL_ALIAS = 3, + // Outlines correspond to netlist nodes that were optimized in a way that makes them inaccessible + // outside of a module's `eval()` function. At the highest debug information level, every inlined + // node has a corresponding outline object. + // + // Outlines can be inspected via the `curr` pointer and can never be modified; the `next` pointer + // is always NULL. Unlike all other objects, the bits of an outline object are meaningful only + // after a call to `cxxrtl_outline_eval` and until any subsequent modification to the netlist. + // Observing this requirement is the responsibility of the caller; it is not enforced. + // + // Outlines always correspond to combinatorial netlist nodes that are not ports. + CXXRTL_OUTLINE = 4, + // More object types may be added in the future, but the existing ones will never change. }; @@ -157,8 +183,8 @@ enum cxxrtl_flag { // Node has bits that are driven by a combinatorial cell or another node. // - // This flag can be set on objects of type `CXXRTL_VALUE` and `CXXRTL_WIRE`. It may be combined - // with `CXXRTL_DRIVEN_SYNC` and `CXXRTL_UNDRIVEN`, as well as other flags. + // This flag can be set on objects of type `CXXRTL_VALUE`, `CXXRTL_WIRE`, and `CXXRTL_OUTLINE`. + // It may be combined with `CXXRTL_DRIVEN_SYNC` and `CXXRTL_UNDRIVEN`, as well as other flags. // // This flag is set on objects that have bits connected to the output of a combinatorial cell, // or directly to another node. For designs without combinatorial loops, writing to such bits @@ -179,8 +205,8 @@ enum cxxrtl_flag { // Description of a simulated object. // -// The `data` array can be accessed directly to inspect and, if applicable, modify the bits -// stored in the object. +// The `curr` and `next` arrays can be accessed directly to inspect and, if applicable, modify +// the bits stored in the object. struct cxxrtl_object { // Type of the object. // @@ -217,6 +243,12 @@ struct cxxrtl_object { uint32_t *curr; uint32_t *next; + // Opaque reference to an outline. Only meaningful for outline objects. + // + // See the documentation of `cxxrtl_outline` for details. When creating a `cxxrtl_object`, set + // this field to NULL. + struct _cxxrtl_outline *outline; + // More description fields may be added in the future, but the existing ones will never change. }; @@ -240,7 +272,7 @@ struct cxxrtl_object *cxxrtl_get_parts(cxxrtl_handle handle, const char *name, s // This function is a shortcut for the most common use of `cxxrtl_get_parts`. It asserts that, // if the object exists, it consists of a single part. If assertions are disabled, it returns NULL // for multi-part objects. -inline struct cxxrtl_object *cxxrtl_get(cxxrtl_handle handle, const char *name) { +static inline struct cxxrtl_object *cxxrtl_get(cxxrtl_handle handle, const char *name) { size_t parts = 0; struct cxxrtl_object *object = cxxrtl_get_parts(handle, name, &parts); assert(object == NULL || parts == 1); @@ -258,6 +290,20 @@ void cxxrtl_enum(cxxrtl_handle handle, void *data, void (*callback)(void *data, const char *name, struct cxxrtl_object *object, size_t parts)); +// Opaque reference to an outline. +// +// An outline is a group of outline objects that are evaluated simultaneously. The identity of +// an outline can be compared to determine whether any two objects belong to the same outline. +typedef struct _cxxrtl_outline *cxxrtl_outline; + +// Evaluate an outline. +// +// After evaluating an outline, the bits of every outline object contained in it are consistent +// with the current state of the netlist. In general, any further modification to the netlist +// causes every outline object to become stale, after which the corresponding outline must be +// re-evaluated, otherwise the bits read from that object are meaningless. +void cxxrtl_outline_eval(cxxrtl_outline outline); + #ifdef __cplusplus } #endif diff --git a/backends/cxxrtl/cxxrtl_vcd.h b/backends/cxxrtl/cxxrtl_vcd.h index dbeabbaf2..3f40a8d12 100644 --- a/backends/cxxrtl/cxxrtl_vcd.h +++ b/backends/cxxrtl/cxxrtl_vcd.h @@ -28,10 +28,13 @@ class vcd_writer { size_t ident; size_t width; chunk_t *curr; - size_t prev_off; + size_t cache_offset; + debug_outline *outline; + bool *outline_warm; }; std::vector<std::string> current_scope; + std::map<debug_outline*, bool> outlines; std::vector<variable> variables; std::vector<chunk_t> cache; std::map<chunk_t*, size_t> aliases; @@ -112,16 +115,22 @@ class vcd_writer { buffer += '\n'; } - const variable ®ister_variable(size_t width, chunk_t *curr, bool constant = false) { + void reset_outlines() { + for (auto &outline_it : outlines) + outline_it.second = /*warm=*/(outline_it.first == nullptr); + } + + variable ®ister_variable(size_t width, chunk_t *curr, bool constant = false, debug_outline *outline = nullptr) { if (aliases.count(curr)) { return variables[aliases[curr]]; } else { + auto outline_it = outlines.emplace(outline, /*warm=*/(outline == nullptr)).first; const size_t chunks = (width + (sizeof(chunk_t) * 8 - 1)) / (sizeof(chunk_t) * 8); aliases[curr] = variables.size(); if (constant) { - variables.emplace_back(variable { variables.size(), width, curr, (size_t)-1 }); + variables.emplace_back(variable { variables.size(), width, curr, (size_t)-1, outline_it->first, &outline_it->second }); } else { - variables.emplace_back(variable { variables.size(), width, curr, cache.size() }); + variables.emplace_back(variable { variables.size(), width, curr, cache.size(), outline_it->first, &outline_it->second }); cache.insert(cache.end(), &curr[0], &curr[chunks]); } return variables.back(); @@ -129,13 +138,17 @@ class vcd_writer { } bool test_variable(const variable &var) { - if (var.prev_off == (size_t)-1) + if (var.cache_offset == (size_t)-1) return false; // constant + if (!*var.outline_warm) { + var.outline->eval(); + *var.outline_warm = true; + } const size_t chunks = (var.width + (sizeof(chunk_t) * 8 - 1)) / (sizeof(chunk_t) * 8); - if (std::equal(&var.curr[0], &var.curr[chunks], &cache[var.prev_off])) { + if (std::equal(&var.curr[0], &var.curr[chunks], &cache[var.cache_offset])) { return false; } else { - std::copy(&var.curr[0], &var.curr[chunks], &cache[var.prev_off]); + std::copy(&var.curr[0], &var.curr[chunks], &cache[var.cache_offset]); return true; } } @@ -197,6 +210,10 @@ public: emit_var(register_variable(item.width, item.curr), "wire", name, item.lsb_at, multipart); break; + case debug_item::OUTLINE: + emit_var(register_variable(item.width, item.curr, /*constant=*/false, item.outline), + "wire", name, item.lsb_at, multipart); + break; } } @@ -211,13 +228,13 @@ public: } void add(const debug_items &items) { - this->template add(items, [](const std::string &, const debug_item &) { + this->add(items, [](const std::string &, const debug_item &) { return true; }); } void add_without_memories(const debug_items &items) { - this->template add(items, [](const std::string &, const debug_item &item) { + this->add(items, [](const std::string &, const debug_item &item) { return item.type != debug_item::MEMORY; }); } @@ -228,6 +245,7 @@ public: emit_scope({}); emit_enddefinitions(); } + reset_outlines(); emit_time(timestamp); for (auto var : variables) if (test_variable(var) || first_sample) { diff --git a/backends/spice/spice.cc b/backends/spice/spice.cc index aa20f106a..ca5c680c9 100644 --- a/backends/spice/spice.cc +++ b/backends/spice/spice.cc @@ -64,7 +64,7 @@ static void print_spice_net(std::ostream &f, RTLIL::SigBit s, std::string &neg, } } -static void print_spice_module(std::ostream &f, RTLIL::Module *module, RTLIL::Design *design, std::string &neg, std::string &pos, std::string &ncpf, bool big_endian, bool use_inames) +static void print_spice_module(std::ostream &f, RTLIL::Module *module, RTLIL::Design *design, std::string &neg, std::string &pos, std::string &buf, std::string &ncpf, bool big_endian, bool use_inames) { SigMap sigmap(module); idict<IdString, 1> inums; @@ -121,10 +121,10 @@ static void print_spice_module(std::ostream &f, RTLIL::Module *module, RTLIL::De for (auto &conn : module->connections()) for (int i = 0; i < conn.first.size(); i++) { - f << stringf("V%d", conn_counter++); - print_spice_net(f, conn.first.extract(i, 1), neg, pos, ncpf, nc_counter, use_inames, inums); + f << (buf == "DC" ? stringf("V%d", conn_counter++) : stringf("X%d", cell_counter++)); print_spice_net(f, conn.second.extract(i, 1), neg, pos, ncpf, nc_counter, use_inames, inums); - f << stringf(" DC 0\n"); + print_spice_net(f, conn.first.extract(i, 1), neg, pos, ncpf, nc_counter, use_inames, inums); + f << (buf == "DC" ? " DC 0\n" : stringf(" %s\n", buf.c_str())); } } @@ -148,6 +148,10 @@ struct SpiceBackend : public Backend { log(" -pos net_name\n"); log(" set the net name for constant 1 (default: Vdd)\n"); log("\n"); + log(" -buf DC|subckt_name\n"); + log(" set the name for jumper element (default: DC)\n"); + log(" (used to connect different nets)\n"); + log("\n"); log(" -nc_prefix\n"); log(" prefix for not-connected nets (default: _NC)\n"); log("\n"); @@ -164,7 +168,7 @@ struct SpiceBackend : public Backend { std::string top_module_name; RTLIL::Module *top_module = NULL; bool big_endian = false, use_inames = false; - std::string neg = "Vss", pos = "Vdd", ncpf = "_NC"; + std::string neg = "Vss", pos = "Vdd", ncpf = "_NC", buf = "DC"; log_header(design, "Executing SPICE backend.\n"); @@ -187,6 +191,10 @@ struct SpiceBackend : public Backend { pos = args[++argidx]; continue; } + if (args[argidx] == "-buf" && argidx+1 < args.size()) { + buf = args[++argidx]; + continue; + } if (args[argidx] == "-nc_prefix" && argidx+1 < args.size()) { ncpf = args[++argidx]; continue; @@ -241,14 +249,14 @@ struct SpiceBackend : public Backend { *f << stringf(" %s", spice_id2str(wire->name).c_str()); } *f << stringf("\n"); - print_spice_module(*f, module, design, neg, pos, ncpf, big_endian, use_inames); + print_spice_module(*f, module, design, neg, pos, buf, ncpf, big_endian, use_inames); *f << stringf(".ENDS %s\n\n", spice_id2str(module->name).c_str()); } if (!top_module_name.empty()) { if (top_module == NULL) log_error("Can't find top module `%s'!\n", top_module_name.c_str()); - print_spice_module(*f, top_module, design, neg, pos, ncpf, big_endian, use_inames); + print_spice_module(*f, top_module, design, neg, pos, buf, ncpf, big_endian, use_inames); *f << stringf("\n"); } |