aboutsummaryrefslogtreecommitdiffstats
path: root/common/kernel
diff options
context:
space:
mode:
authorgatecat <gatecat@ds0.me>2022-04-08 13:42:54 +0100
committergatecat <gatecat@ds0.me>2022-04-08 13:42:54 +0100
commit49f178ed94b5fad00d25dbd12adea0bf4732f803 (patch)
treeea642e20bc07441a800944390e1f904e6ce5b113 /common/kernel
parente42e22575f20b59634f88b5cf694efdb413ff0a0 (diff)
downloadnextpnr-49f178ed94b5fad00d25dbd12adea0bf4732f803.tar.gz
nextpnr-49f178ed94b5fad00d25dbd12adea0bf4732f803.tar.bz2
nextpnr-49f178ed94b5fad00d25dbd12adea0bf4732f803.zip
Split up common into kernel,place,route
Signed-off-by: gatecat <gatecat@ds0.me>
Diffstat (limited to 'common/kernel')
-rw-r--r--common/kernel/arch_api.h158
-rw-r--r--common/kernel/arch_pybindings_shared.h147
-rw-r--r--common/kernel/archcheck.cc408
-rw-r--r--common/kernel/base_arch.h486
-rw-r--r--common/kernel/base_clusterinfo.h45
-rw-r--r--common/kernel/basectx.cc279
-rw-r--r--common/kernel/basectx.h243
-rw-r--r--common/kernel/bits.cc56
-rw-r--r--common/kernel/bits.h76
-rw-r--r--common/kernel/chain_utils.h69
-rw-r--r--common/kernel/command.cc563
-rw-r--r--common/kernel/command.h74
-rw-r--r--common/kernel/constraints.h70
-rw-r--r--common/kernel/constraints.impl.h109
-rw-r--r--common/kernel/context.cc428
-rw-r--r--common/kernel/context.h119
-rw-r--r--common/kernel/design_utils.cc52
-rw-r--r--common/kernel/design_utils.h100
-rw-r--r--common/kernel/deterministic_rng.h103
-rw-r--r--common/kernel/dynamic_bitarray.h211
-rw-r--r--common/kernel/embed.cc49
-rw-r--r--common/kernel/embed.h49
-rw-r--r--common/kernel/exclusive_state_groups.h154
-rw-r--r--common/kernel/exclusive_state_groups.impl.h89
-rw-r--r--common/kernel/handle_error.cc61
-rw-r--r--common/kernel/hashlib.h1210
-rw-r--r--common/kernel/idstring.cc51
-rw-r--r--common/kernel/idstring.h75
-rw-r--r--common/kernel/idstringlist.cc80
-rw-r--r--common/kernel/idstringlist.h87
-rw-r--r--common/kernel/indexed_store.h297
-rw-r--r--common/kernel/log.cc198
-rw-r--r--common/kernel/log.h92
-rw-r--r--common/kernel/nextpnr.cc35
-rw-r--r--common/kernel/nextpnr.h29
-rw-r--r--common/kernel/nextpnr_assertions.cc33
-rw-r--r--common/kernel/nextpnr_assertions.h64
-rw-r--r--common/kernel/nextpnr_base_types.h135
-rw-r--r--common/kernel/nextpnr_namespaces.cc23
-rw-r--r--common/kernel/nextpnr_namespaces.h58
-rw-r--r--common/kernel/nextpnr_types.cc180
-rw-r--r--common/kernel/nextpnr_types.h364
-rw-r--r--common/kernel/property.cc80
-rw-r--r--common/kernel/property.h131
-rw-r--r--common/kernel/pybindings.cc362
-rw-r--r--common/kernel/pybindings.h93
-rw-r--r--common/kernel/pycontainers.h575
-rw-r--r--common/kernel/pywrappers.h463
-rw-r--r--common/kernel/relptr.h74
-rw-r--r--common/kernel/report.cc259
-rw-r--r--common/kernel/scope_lock.h67
-rw-r--r--common/kernel/sdf.cc334
-rw-r--r--common/kernel/sso_array.h132
-rw-r--r--common/kernel/str_ring_buffer.cc34
-rw-r--r--common/kernel/str_ring_buffer.h45
-rw-r--r--common/kernel/svg.cc152
-rw-r--r--common/kernel/timing.cc1515
-rw-r--r--common/kernel/timing.h236
-rw-r--r--common/kernel/util.h241
59 files changed, 12002 insertions, 0 deletions
diff --git a/common/kernel/arch_api.h b/common/kernel/arch_api.h
new file mode 100644
index 00000000..14a30652
--- /dev/null
+++ b/common/kernel/arch_api.h
@@ -0,0 +1,158 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef ARCH_API_H
+#define ARCH_API_H
+
+#include <algorithm>
+
+#include "basectx.h"
+#include "idstring.h"
+#include "idstringlist.h"
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+#include "nextpnr_types.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// The specification of the Arch API (pure virtual)
+template <typename R> struct ArchAPI : BaseCtx
+{
+ // Basic config
+ virtual IdString archId() const = 0;
+ virtual std::string getChipName() const = 0;
+ virtual typename R::ArchArgsT archArgs() const = 0;
+ virtual IdString archArgsToId(typename R::ArchArgsT args) const = 0;
+ virtual int getGridDimX() const = 0;
+ virtual int getGridDimY() const = 0;
+ virtual int getTileBelDimZ(int x, int y) const = 0;
+ virtual int getTilePipDimZ(int x, int y) const = 0;
+ virtual char getNameDelimiter() const = 0;
+ // Bel methods
+ virtual typename R::AllBelsRangeT getBels() const = 0;
+ virtual IdStringList getBelName(BelId bel) const = 0;
+ virtual BelId getBelByName(IdStringList name) const = 0;
+ virtual uint32_t getBelChecksum(BelId bel) const = 0;
+ virtual void bindBel(BelId bel, CellInfo *cell, PlaceStrength strength) = 0;
+ virtual void unbindBel(BelId bel) = 0;
+ virtual Loc getBelLocation(BelId bel) const = 0;
+ virtual BelId getBelByLocation(Loc loc) const = 0;
+ virtual typename R::TileBelsRangeT getBelsByTile(int x, int y) const = 0;
+ virtual bool getBelGlobalBuf(BelId bel) const = 0;
+ virtual bool checkBelAvail(BelId bel) const = 0;
+ virtual CellInfo *getBoundBelCell(BelId bel) const = 0;
+ virtual CellInfo *getConflictingBelCell(BelId bel) const = 0;
+ virtual IdString getBelType(BelId bel) const = 0;
+ virtual bool getBelHidden(BelId bel) const = 0;
+ virtual typename R::BelAttrsRangeT getBelAttrs(BelId bel) const = 0;
+ virtual WireId getBelPinWire(BelId bel, IdString pin) const = 0;
+ virtual PortType getBelPinType(BelId bel, IdString pin) const = 0;
+ virtual typename R::BelPinsRangeT getBelPins(BelId bel) const = 0;
+ virtual typename R::CellBelPinRangeT getBelPinsForCellPin(const CellInfo *cell_info, IdString pin) const = 0;
+ // Wire methods
+ virtual typename R::AllWiresRangeT getWires() const = 0;
+ virtual WireId getWireByName(IdStringList name) const = 0;
+ virtual IdStringList getWireName(WireId wire) const = 0;
+ virtual IdString getWireType(WireId wire) const = 0;
+ virtual typename R::WireAttrsRangeT getWireAttrs(WireId) const = 0;
+ virtual typename R::DownhillPipRangeT getPipsDownhill(WireId wire) const = 0;
+ virtual typename R::UphillPipRangeT getPipsUphill(WireId wire) const = 0;
+ virtual typename R::WireBelPinRangeT getWireBelPins(WireId wire) const = 0;
+ virtual uint32_t getWireChecksum(WireId wire) const = 0;
+ virtual void bindWire(WireId wire, NetInfo *net, PlaceStrength strength) = 0;
+ virtual void unbindWire(WireId wire) = 0;
+ virtual bool checkWireAvail(WireId wire) const = 0;
+ virtual NetInfo *getBoundWireNet(WireId wire) const = 0;
+ virtual WireId getConflictingWireWire(WireId wire) const = 0;
+ virtual NetInfo *getConflictingWireNet(WireId wire) const = 0;
+ virtual DelayQuad getWireDelay(WireId wire) const = 0;
+ // Pip methods
+ virtual typename R::AllPipsRangeT getPips() const = 0;
+ virtual PipId getPipByName(IdStringList name) const = 0;
+ virtual IdStringList getPipName(PipId pip) const = 0;
+ virtual IdString getPipType(PipId pip) const = 0;
+ virtual typename R::PipAttrsRangeT getPipAttrs(PipId) const = 0;
+ virtual uint32_t getPipChecksum(PipId pip) const = 0;
+ virtual void bindPip(PipId pip, NetInfo *net, PlaceStrength strength) = 0;
+ virtual void unbindPip(PipId pip) = 0;
+ virtual bool checkPipAvail(PipId pip) const = 0;
+ virtual bool checkPipAvailForNet(PipId pip, NetInfo *net) const = 0;
+ virtual NetInfo *getBoundPipNet(PipId pip) const = 0;
+ virtual WireId getConflictingPipWire(PipId pip) const = 0;
+ virtual NetInfo *getConflictingPipNet(PipId pip) const = 0;
+ virtual WireId getPipSrcWire(PipId pip) const = 0;
+ virtual WireId getPipDstWire(PipId pip) const = 0;
+ virtual DelayQuad getPipDelay(PipId pip) const = 0;
+ virtual Loc getPipLocation(PipId pip) const = 0;
+ // Group methods
+ virtual GroupId getGroupByName(IdStringList name) const = 0;
+ virtual IdStringList getGroupName(GroupId group) const = 0;
+ virtual typename R::AllGroupsRangeT getGroups() const = 0;
+ virtual typename R::GroupBelsRangeT getGroupBels(GroupId group) const = 0;
+ virtual typename R::GroupWiresRangeT getGroupWires(GroupId group) const = 0;
+ virtual typename R::GroupPipsRangeT getGroupPips(GroupId group) const = 0;
+ virtual typename R::GroupGroupsRangeT getGroupGroups(GroupId group) const = 0;
+ // Delay Methods
+ virtual delay_t predictDelay(BelId src_bel, IdString src_pin, BelId dst_bel, IdString dst_pin) const = 0;
+ virtual delay_t getDelayEpsilon() const = 0;
+ virtual delay_t getRipupDelayPenalty() const = 0;
+ virtual float getDelayNS(delay_t v) const = 0;
+ virtual delay_t getDelayFromNS(float ns) const = 0;
+ virtual uint32_t getDelayChecksum(delay_t v) const = 0;
+ virtual bool getBudgetOverride(const NetInfo *net_info, const PortRef &sink, delay_t &budget) const = 0;
+ virtual delay_t estimateDelay(WireId src, WireId dst) const = 0;
+ virtual ArcBounds getRouteBoundingBox(WireId src, WireId dst) const = 0;
+ // Decal methods
+ virtual typename R::DecalGfxRangeT getDecalGraphics(DecalId decal) const = 0;
+ virtual DecalXY getBelDecal(BelId bel) const = 0;
+ virtual DecalXY getWireDecal(WireId wire) const = 0;
+ virtual DecalXY getPipDecal(PipId pip) const = 0;
+ virtual DecalXY getGroupDecal(GroupId group) const = 0;
+ // Cell timing methods
+ virtual bool getCellDelay(const CellInfo *cell, IdString fromPort, IdString toPort, DelayQuad &delay) const = 0;
+ virtual TimingPortClass getPortTimingClass(const CellInfo *cell, IdString port, int &clockInfoCount) const = 0;
+ virtual TimingClockingInfo getPortClockingInfo(const CellInfo *cell, IdString port, int index) const = 0;
+ // Placement validity checks
+ virtual bool isValidBelForCellType(IdString cell_type, BelId bel) const = 0;
+ virtual IdString getBelBucketName(BelBucketId bucket) const = 0;
+ virtual BelBucketId getBelBucketByName(IdString name) const = 0;
+ virtual BelBucketId getBelBucketForBel(BelId bel) const = 0;
+ virtual BelBucketId getBelBucketForCellType(IdString cell_type) const = 0;
+ virtual bool isBelLocationValid(BelId bel) const = 0;
+ virtual typename R::CellTypeRangeT getCellTypes() const = 0;
+ virtual typename R::BelBucketRangeT getBelBuckets() const = 0;
+ virtual typename R::BucketBelRangeT getBelsInBucket(BelBucketId bucket) const = 0;
+ // Cluster methods
+ virtual CellInfo *getClusterRootCell(ClusterId cluster) const = 0;
+ virtual ArcBounds getClusterBounds(ClusterId cluster) const = 0;
+ virtual Loc getClusterOffset(const CellInfo *cell) const = 0;
+ virtual bool isClusterStrict(const CellInfo *cell) const = 0;
+ virtual bool getClusterPlacement(ClusterId cluster, BelId root_bel,
+ std::vector<std::pair<CellInfo *, BelId>> &placement) const = 0;
+ // Flow methods
+ virtual bool pack() = 0;
+ virtual bool place() = 0;
+ virtual bool route() = 0;
+ virtual void assignArchInfo() = 0;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* ARCH_API_H */
diff --git a/common/kernel/arch_pybindings_shared.h b/common/kernel/arch_pybindings_shared.h
new file mode 100644
index 00000000..b3dc0506
--- /dev/null
+++ b/common/kernel/arch_pybindings_shared.h
@@ -0,0 +1,147 @@
+// Common Python bindings #included by all arches
+
+readonly_wrapper<Context, decltype(&Context::cells), &Context::cells, wrap_context<CellMap &>>::def_wrap(ctx_cls,
+ "cells");
+readonly_wrapper<Context, decltype(&Context::nets), &Context::nets, wrap_context<NetMap &>>::def_wrap(ctx_cls, "nets");
+readonly_wrapper<Context, decltype(&Context::net_aliases), &Context::net_aliases, wrap_context<AliasMap &>>::def_wrap(
+ ctx_cls, "net_aliases");
+readonly_wrapper<Context, decltype(&Context::hierarchy), &Context::hierarchy, wrap_context<HierarchyMap &>>::def_wrap(
+ ctx_cls, "hierarchy");
+readwrite_wrapper<Context, decltype(&Context::top_module), &Context::top_module, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(ctx_cls, "top_module");
+readonly_wrapper<Context, decltype(&Context::timing_result), &Context::timing_result,
+ wrap_context<TimingResult &>>::def_wrap(ctx_cls, "timing_result");
+
+fn_wrapper_0a<Context, decltype(&Context::getNameDelimiter), &Context::getNameDelimiter, pass_through<char>>::def_wrap(
+ ctx_cls, "getNameDelimiter");
+
+fn_wrapper_1a<Context, decltype(&Context::getNetByAlias), &Context::getNetByAlias, deref_and_wrap<NetInfo>,
+ conv_from_str<IdString>>::def_wrap(ctx_cls, "getNetByAlias");
+fn_wrapper_2a_v<Context, decltype(&Context::addClock), &Context::addClock, conv_from_str<IdString>,
+ pass_through<float>>::def_wrap(ctx_cls, "addClock");
+fn_wrapper_5a_v<Context, decltype(&Context::createRectangularRegion), &Context::createRectangularRegion,
+ conv_from_str<IdString>, pass_through<int>, pass_through<int>, pass_through<int>,
+ pass_through<int>>::def_wrap(ctx_cls, "createRectangularRegion");
+fn_wrapper_2a_v<Context, decltype(&Context::addBelToRegion), &Context::addBelToRegion, conv_from_str<IdString>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "addBelToRegion");
+fn_wrapper_2a_v<Context, decltype(&Context::constrainCellToRegion), &Context::constrainCellToRegion,
+ conv_from_str<IdString>, conv_from_str<IdString>>::def_wrap(ctx_cls, "constrainCellToRegion");
+
+fn_wrapper_2a<Context, decltype(&Context::getNetinfoRouteDelay), &Context::getNetinfoRouteDelay, pass_through<delay_t>,
+ addr_and_unwrap<NetInfo>, unwrap_context<PortRef &>>::def_wrap(ctx_cls, "getNetinfoRouteDelay");
+
+fn_wrapper_1a<Context, decltype(&Context::createNet), &Context::createNet, deref_and_wrap<NetInfo>,
+ conv_from_str<IdString>>::def_wrap(ctx_cls, "createNet");
+fn_wrapper_3a_v<Context, decltype(&Context::connectPort), &Context::connectPort, conv_from_str<IdString>,
+ conv_from_str<IdString>, conv_from_str<IdString>>::def_wrap(ctx_cls, "connectPort");
+fn_wrapper_2a_v<Context, decltype(&Context::disconnectPort), &Context::disconnectPort, conv_from_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(ctx_cls, "disconnectPort");
+fn_wrapper_1a_v<Context, decltype(&Context::ripupNet), &Context::ripupNet, conv_from_str<IdString>>::def_wrap(
+ ctx_cls, "ripupNet");
+fn_wrapper_1a_v<Context, decltype(&Context::lockNetRouting), &Context::lockNetRouting,
+ conv_from_str<IdString>>::def_wrap(ctx_cls, "lockNetRouting");
+
+fn_wrapper_2a<Context, decltype(&Context::createCell), &Context::createCell, deref_and_wrap<CellInfo>,
+ conv_from_str<IdString>, conv_from_str<IdString>>::def_wrap(ctx_cls, "createCell");
+fn_wrapper_2a_v<Context, decltype(&Context::copyBelPorts), &Context::copyBelPorts, conv_from_str<IdString>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "copyBelPorts");
+
+fn_wrapper_1a<Context, decltype(&Context::getBelType), &Context::getBelType, conv_to_str<IdString>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "getBelType");
+fn_wrapper_1a<Context, decltype(&Context::getBelLocation), &Context::getBelLocation, pass_through<Loc>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "getBelLocation");
+fn_wrapper_1a<Context, decltype(&Context::checkBelAvail), &Context::checkBelAvail, pass_through<bool>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "checkBelAvail");
+fn_wrapper_1a<Context, decltype(&Context::getBelChecksum), &Context::getBelChecksum, pass_through<uint32_t>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "getBelChecksum");
+fn_wrapper_3a_v<Context, decltype(&Context::bindBel), &Context::bindBel, conv_from_str<BelId>,
+ addr_and_unwrap<CellInfo>, pass_through<PlaceStrength>>::def_wrap(ctx_cls, "bindBel");
+fn_wrapper_1a_v<Context, decltype(&Context::unbindBel), &Context::unbindBel, conv_from_str<BelId>>::def_wrap(
+ ctx_cls, "unbindBel");
+fn_wrapper_1a<Context, decltype(&Context::getBoundBelCell), &Context::getBoundBelCell, deref_and_wrap<CellInfo>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "getBoundBelCell");
+fn_wrapper_1a<Context, decltype(&Context::getConflictingBelCell), &Context::getConflictingBelCell,
+ deref_and_wrap<CellInfo>, conv_from_str<BelId>>::def_wrap(ctx_cls, "getConflictingBelCell");
+fn_wrapper_0a<Context, decltype(&Context::getBels), &Context::getBels, wrap_context<BelRange>>::def_wrap(ctx_cls,
+ "getBels");
+
+fn_wrapper_2a<Context, decltype(&Context::getBelPinWire), &Context::getBelPinWire, conv_to_str<WireId>,
+ conv_from_str<BelId>, conv_from_str<IdString>>::def_wrap(ctx_cls, "getBelPinWire");
+fn_wrapper_2a<Context, decltype(&Context::getBelPinType), &Context::getBelPinType, pass_through<PortType>,
+ conv_from_str<BelId>, conv_from_str<IdString>>::def_wrap(ctx_cls, "getBelPinType");
+fn_wrapper_1a<Context, decltype(&Context::getWireBelPins), &Context::getWireBelPins, wrap_context<BelPinRange>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getWireBelPins");
+
+fn_wrapper_1a<Context, decltype(&Context::getWireChecksum), &Context::getWireChecksum, pass_through<uint32_t>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getWireChecksum");
+fn_wrapper_1a<Context, decltype(&Context::getWireType), &Context::getWireType, conv_to_str<IdString>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getWireType");
+fn_wrapper_3a_v<Context, decltype(&Context::bindWire), &Context::bindWire, conv_from_str<WireId>,
+ addr_and_unwrap<NetInfo>, pass_through<PlaceStrength>>::def_wrap(ctx_cls, "bindWire");
+fn_wrapper_1a_v<Context, decltype(&Context::unbindWire), &Context::unbindWire, conv_from_str<WireId>>::def_wrap(
+ ctx_cls, "unbindWire");
+fn_wrapper_1a<Context, decltype(&Context::checkWireAvail), &Context::checkWireAvail, pass_through<bool>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "checkWireAvail");
+fn_wrapper_1a<Context, decltype(&Context::getBoundWireNet), &Context::getBoundWireNet, deref_and_wrap<NetInfo>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getBoundWireNet");
+fn_wrapper_1a<Context, decltype(&Context::getConflictingWireNet), &Context::getConflictingWireNet,
+ deref_and_wrap<NetInfo>, conv_from_str<WireId>>::def_wrap(ctx_cls, "getConflictingWireNet");
+
+fn_wrapper_0a<Context, decltype(&Context::getWires), &Context::getWires, wrap_context<WireRange>>::def_wrap(ctx_cls,
+ "getWires");
+
+fn_wrapper_0a<Context, decltype(&Context::getPips), &Context::getPips, wrap_context<AllPipRange>>::def_wrap(ctx_cls,
+ "getPips");
+fn_wrapper_1a<Context, decltype(&Context::getPipChecksum), &Context::getPipChecksum, pass_through<uint32_t>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getPipChecksum");
+fn_wrapper_1a<Context, decltype(&Context::getPipLocation), &Context::getPipLocation, pass_through<Loc>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getPipLocation");
+fn_wrapper_3a_v<Context, decltype(&Context::bindPip), &Context::bindPip, conv_from_str<PipId>, addr_and_unwrap<NetInfo>,
+ pass_through<PlaceStrength>>::def_wrap(ctx_cls, "bindPip");
+fn_wrapper_1a_v<Context, decltype(&Context::unbindPip), &Context::unbindPip, conv_from_str<PipId>>::def_wrap(
+ ctx_cls, "unbindPip");
+fn_wrapper_1a<Context, decltype(&Context::checkPipAvail), &Context::checkPipAvail, pass_through<bool>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "checkPipAvail");
+fn_wrapper_1a<Context, decltype(&Context::getBoundPipNet), &Context::getBoundPipNet, deref_and_wrap<NetInfo>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getBoundPipNet");
+fn_wrapper_1a<Context, decltype(&Context::getConflictingPipNet), &Context::getConflictingPipNet,
+ deref_and_wrap<NetInfo>, conv_from_str<PipId>>::def_wrap(ctx_cls, "getConflictingPipNet");
+
+fn_wrapper_1a<Context, decltype(&Context::getPipsDownhill), &Context::getPipsDownhill, wrap_context<DownhillPipRange>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getPipsDownhill");
+fn_wrapper_1a<Context, decltype(&Context::getPipsUphill), &Context::getPipsUphill, wrap_context<UphillPipRange>,
+ conv_from_str<WireId>>::def_wrap(ctx_cls, "getPipsUphill");
+
+fn_wrapper_1a<Context, decltype(&Context::getPipSrcWire), &Context::getPipSrcWire, conv_to_str<WireId>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getPipSrcWire");
+fn_wrapper_1a<Context, decltype(&Context::getPipDstWire), &Context::getPipDstWire, conv_to_str<WireId>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getPipDstWire");
+fn_wrapper_1a<Context, decltype(&Context::getPipDelay), &Context::getPipDelay, pass_through<DelayQuad>,
+ conv_from_str<PipId>>::def_wrap(ctx_cls, "getPipDelay");
+
+fn_wrapper_0a<Context, decltype(&Context::getChipName), &Context::getChipName, pass_through<std::string>>::def_wrap(
+ ctx_cls, "getChipName");
+fn_wrapper_0a<Context, decltype(&Context::archId), &Context::archId, conv_to_str<IdString>>::def_wrap(ctx_cls,
+ "archId");
+
+fn_wrapper_2a_v<Context, decltype(&Context::writeSVG), &Context::writeSVG, pass_through<std::string>,
+ pass_through<std::string>>::def_wrap(ctx_cls, "writeSVG");
+
+fn_wrapper_1a<Context, decltype(&Context::isBelLocationValid), &Context::isBelLocationValid, pass_through<bool>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "isBelLocationValid");
+
+// const\_range\<BelBucketId\> getBelBuckets() const
+fn_wrapper_0a<Context, decltype(&Context::getBelBuckets), &Context::getBelBuckets,
+ wrap_context<BelBucketRange>>::def_wrap(ctx_cls, "getBelBuckets");
+// BelBucketId getBelBucketForBel(BelId bel) const
+fn_wrapper_1a<Context, decltype(&Context::getBelBucketForBel), &Context::getBelBucketForBel, conv_to_str<BelBucketId>,
+ conv_from_str<BelId>>::def_wrap(ctx_cls, "getBelBucketForBel");
+// BelBucketId getBelBucketForCellType(IdString cell\_type) const
+fn_wrapper_1a<Context, decltype(&Context::getBelBucketForCellType), &Context::getBelBucketForCellType,
+ conv_to_str<BelBucketId>, conv_from_str<IdString>>::def_wrap(ctx_cls, "getBelBucketForCellType");
+// const\_range\<BelId\> getBelsInBucket(BelBucketId bucket) const
+fn_wrapper_1a<Context, decltype(&Context::getBelsInBucket), &Context::getBelsInBucket,
+ wrap_context<BelRangeForBelBucket>, conv_from_str<BelBucketId>>::def_wrap(ctx_cls, "getBelsInBucket");
+// bool isValidBelForCellType(IdString cell\_type, BelId bel) const
+fn_wrapper_2a<Context, decltype(&Context::isValidBelForCellType), &Context::isValidBelForCellType, pass_through<bool>,
+ conv_from_str<IdString>, conv_from_str<BelId>>::def_wrap(ctx_cls, "isValidBelForCellType");
diff --git a/common/kernel/archcheck.cc b/common/kernel/archcheck.cc
new file mode 100644
index 00000000..23ec7aee
--- /dev/null
+++ b/common/kernel/archcheck.cc
@@ -0,0 +1,408 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "log.h"
+#include "nextpnr.h"
+
+#if 0
+#define dbg(...) log(__VA_ARGS__)
+#else
+#define dbg(...)
+#endif
+
+USING_NEXTPNR_NAMESPACE
+
+#ifndef ARCH_MISTRAL
+// The LRU cache to reduce memory usage during the connectivity check relies on getPips() having some spacial locality,
+// which the current CycloneV arch impl doesn't have. This may be fixed in the future, though.
+#define USING_LRU_CACHE
+#endif
+
+namespace {
+
+void archcheck_names(const Context *ctx)
+{
+ log_info("Checking entity names.\n");
+
+ log_info("Checking bel names..\n");
+ for (BelId bel : ctx->getBels()) {
+ IdStringList name = ctx->getBelName(bel);
+ BelId bel2 = ctx->getBelByName(name);
+ if (bel != bel2) {
+ log_error("bel != bel2, name = %s\n", ctx->nameOfBel(bel));
+ }
+ }
+
+ log_info("Checking wire names..\n");
+ for (WireId wire : ctx->getWires()) {
+ IdStringList name = ctx->getWireName(wire);
+ WireId wire2 = ctx->getWireByName(name);
+ if (wire != wire2) {
+ log_error("wire != wire2, name = %s\n", ctx->nameOfWire(wire));
+ }
+ }
+
+ log_info("Checking bucket names..\n");
+ for (BelBucketId bucket : ctx->getBelBuckets()) {
+ IdString name = ctx->getBelBucketName(bucket);
+ BelBucketId bucket2 = ctx->getBelBucketByName(name);
+ if (bucket != bucket2) {
+ log_error("bucket != bucket2, name = %s\n", name.c_str(ctx));
+ }
+ }
+
+#ifndef ARCH_ECP5
+ log_info("Checking pip names..\n");
+ for (PipId pip : ctx->getPips()) {
+ IdStringList name = ctx->getPipName(pip);
+ PipId pip2 = ctx->getPipByName(name);
+ if (pip != pip2) {
+ log_error("pip != pip2, name = %s\n", ctx->nameOfPip(pip));
+ }
+ }
+#endif
+ log_break();
+}
+
+void archcheck_locs(const Context *ctx)
+{
+ log_info("Checking location data.\n");
+
+ log_info("Checking all bels..\n");
+ for (BelId bel : ctx->getBels()) {
+ log_assert(bel != BelId());
+ dbg("> %s\n", ctx->getBelName(bel).c_str(ctx));
+
+ Loc loc = ctx->getBelLocation(bel);
+ dbg(" ... %d %d %d\n", loc.x, loc.y, loc.z);
+
+ log_assert(0 <= loc.x);
+ log_assert(0 <= loc.y);
+ log_assert(0 <= loc.z);
+ log_assert(loc.x < ctx->getGridDimX());
+ log_assert(loc.y < ctx->getGridDimY());
+ log_assert(loc.z < ctx->getTileBelDimZ(loc.x, loc.y));
+
+ BelId bel2 = ctx->getBelByLocation(loc);
+ dbg(" ... %s\n", ctx->getBelName(bel2).c_str(ctx));
+ log_assert(bel == bel2);
+ }
+
+ log_info("Checking all locations..\n");
+ for (int x = 0; x < ctx->getGridDimX(); x++)
+ for (int y = 0; y < ctx->getGridDimY(); y++) {
+ dbg("> %d %d\n", x, y);
+ pool<int> usedz;
+
+ for (int z = 0; z < ctx->getTileBelDimZ(x, y); z++) {
+ BelId bel = ctx->getBelByLocation(Loc(x, y, z));
+ if (bel == BelId())
+ continue;
+ Loc loc = ctx->getBelLocation(bel);
+ dbg(" + %d %s\n", z, ctx->nameOfBel(bel));
+ log_assert(x == loc.x);
+ log_assert(y == loc.y);
+ log_assert(z == loc.z);
+ usedz.insert(z);
+ }
+
+ for (BelId bel : ctx->getBelsByTile(x, y)) {
+ Loc loc = ctx->getBelLocation(bel);
+ dbg(" - %d %s\n", loc.z, ctx->nameOfBel(bel));
+ log_assert(x == loc.x);
+ log_assert(y == loc.y);
+ log_assert(usedz.count(loc.z));
+ usedz.erase(loc.z);
+ }
+
+ log_assert(usedz.empty());
+ }
+
+ log_break();
+}
+
+// Implements a LRU cache for pip to wire via getPipsDownhill/getPipsUphill.
+//
+// This allows a fast way to check getPipsDownhill/getPipsUphill from getPips,
+// without balloning memory usage.
+struct LruWireCacheMap
+{
+ LruWireCacheMap(const Context *ctx, size_t cache_size) : ctx(ctx), cache_size(cache_size)
+ {
+ cache_hits = 0;
+ cache_misses = 0;
+ cache_evictions = 0;
+ }
+
+ const Context *ctx;
+ size_t cache_size;
+
+ // Cache stats for checking on cache behavior.
+ size_t cache_hits;
+ size_t cache_misses;
+ size_t cache_evictions;
+
+ // Most recent accessed wires are added to the back of the list, front of
+ // list is oldest wire in cache.
+ std::list<WireId> last_access_list;
+ // Quick wire -> list element lookup.
+ dict<WireId, std::list<WireId>::iterator> last_access_map;
+
+ dict<PipId, WireId> pips_downhill;
+ dict<PipId, WireId> pips_uphill;
+
+ void removeWireFromCache(WireId wire_to_remove)
+ {
+ for (PipId pip : ctx->getPipsDownhill(wire_to_remove)) {
+ log_assert(pips_downhill.erase(pip) == 1);
+ }
+
+ for (PipId pip : ctx->getPipsUphill(wire_to_remove)) {
+ log_assert(pips_uphill.erase(pip) == 1);
+ }
+ }
+
+ void addWireToCache(WireId wire)
+ {
+ for (PipId pip : ctx->getPipsDownhill(wire)) {
+ auto result = pips_downhill.emplace(pip, wire);
+ log_assert(result.second);
+ }
+
+ for (PipId pip : ctx->getPipsUphill(wire)) {
+ auto result = pips_uphill.emplace(pip, wire);
+ log_assert(result.second);
+ }
+ }
+
+ void populateCache(WireId wire)
+ {
+ // Put this wire at the end of last_access_list.
+ auto iter = last_access_list.emplace(last_access_list.end(), wire);
+ last_access_map.emplace(wire, iter);
+
+ if (last_access_list.size() > cache_size) {
+ // Cache is full, remove front of last_access_list.
+ cache_evictions += 1;
+ WireId wire_to_remove = last_access_list.front();
+ last_access_list.pop_front();
+ log_assert(last_access_map.erase(wire_to_remove) == 1);
+
+ removeWireFromCache(wire_to_remove);
+ }
+
+ addWireToCache(wire);
+ }
+
+ // Determine if wire is in the cache. If wire is not in the cache,
+ // adds the wire to the cache, and potentially evicts the oldest wire if
+ // cache is now full.
+ void checkCache(WireId wire)
+ {
+ auto iter = last_access_map.find(wire);
+ if (iter == last_access_map.end()) {
+ cache_misses += 1;
+ populateCache(wire);
+ } else {
+ // Record that this wire has been accessed.
+ cache_hits += 1;
+ last_access_list.splice(last_access_list.end(), last_access_list, iter->second);
+ }
+ }
+
+ // Returns true if pip is uphill of wire (e.g. pip in getPipsUphill(wire)).
+ bool isPipUphill(PipId pip, WireId wire)
+ {
+ checkCache(wire);
+ return pips_uphill.at(pip) == wire;
+ }
+
+ // Returns true if pip is downhill of wire (e.g. pip in getPipsDownhill(wire)).
+ bool isPipDownhill(PipId pip, WireId wire)
+ {
+ checkCache(wire);
+ return pips_downhill.at(pip) == wire;
+ }
+
+ void cache_info() const
+ {
+ log_info("Cache hits: %zu\n", cache_hits);
+ log_info("Cache misses: %zu\n", cache_misses);
+ log_info("Cache evictions: %zu\n", cache_evictions);
+ }
+};
+
+void archcheck_conn(const Context *ctx)
+{
+ log_info("Checking connectivity data.\n");
+
+ log_info("Checking all wires...\n");
+
+#ifndef USING_LRU_CACHE
+ dict<PipId, WireId> pips_downhill;
+ dict<PipId, WireId> pips_uphill;
+#endif
+
+ for (WireId wire : ctx->getWires()) {
+ for (BelPin belpin : ctx->getWireBelPins(wire)) {
+ WireId wire2 = ctx->getBelPinWire(belpin.bel, belpin.pin);
+ log_assert(wire == wire2);
+ }
+
+ for (PipId pip : ctx->getPipsDownhill(wire)) {
+ WireId wire2 = ctx->getPipSrcWire(pip);
+ log_assert(wire == wire2);
+#ifndef USING_LRU_CACHE
+ auto result = pips_downhill.emplace(pip, wire);
+ log_assert(result.second);
+#endif
+ }
+
+ for (PipId pip : ctx->getPipsUphill(wire)) {
+ WireId wire2 = ctx->getPipDstWire(pip);
+ log_assert(wire == wire2);
+#ifndef USING_LRU_CACHE
+ auto result = pips_uphill.emplace(pip, wire);
+ log_assert(result.second);
+#endif
+ }
+ }
+
+ log_info("Checking all BELs...\n");
+ for (BelId bel : ctx->getBels()) {
+ for (IdString pin : ctx->getBelPins(bel)) {
+ WireId wire = ctx->getBelPinWire(bel, pin);
+
+ if (wire == WireId()) {
+ continue;
+ }
+
+ bool found_belpin = false;
+ for (BelPin belpin : ctx->getWireBelPins(wire)) {
+ if (belpin.bel == bel && belpin.pin == pin) {
+ found_belpin = true;
+ break;
+ }
+ }
+
+ log_assert(found_belpin);
+ }
+ }
+#ifdef USING_LRU_CACHE
+ // This cache is used to meet two goals:
+ // - Avoid linear scan by invoking getPipsDownhill/getPipsUphill directly.
+ // - Avoid having pip -> wire maps for the entire part.
+ //
+ // The overhead of maintaining the cache is small relatively to the memory
+ // gains by avoiding the full pip -> wire map, and still preserves a fast
+ // pip -> wire, assuming that pips are returned from getPips with some
+ // chip locality.
+ LruWireCacheMap pip_cache(ctx, /*cache_size=*/64 * 1024);
+#endif
+ log_info("Checking all PIPs...\n");
+ for (PipId pip : ctx->getPips()) {
+ WireId src_wire = ctx->getPipSrcWire(pip);
+ if (src_wire != WireId()) {
+#ifdef USING_LRU_CACHE
+ log_assert(pip_cache.isPipDownhill(pip, src_wire));
+#else
+ log_assert(pips_downhill.at(pip) == src_wire);
+#endif
+ }
+
+ WireId dst_wire = ctx->getPipDstWire(pip);
+ if (dst_wire != WireId()) {
+#ifdef USING_LRU_CACHE
+ log_assert(pip_cache.isPipUphill(pip, dst_wire));
+#else
+ log_assert(pips_uphill.at(pip) == dst_wire);
+#endif
+ }
+ }
+}
+
+void archcheck_buckets(const Context *ctx)
+{
+ log_info("Checking bucket data.\n");
+
+ // BEL buckets should be subsets of BELs that form an exact cover.
+ // In particular that means cell types in a bucket should only be
+ // placable in that bucket.
+ for (BelBucketId bucket : ctx->getBelBuckets()) {
+
+ // Find out which cell types are in this bucket.
+ pool<IdString> cell_types_in_bucket;
+ for (IdString cell_type : ctx->getCellTypes()) {
+ if (ctx->getBelBucketForCellType(cell_type) == bucket) {
+ cell_types_in_bucket.insert(cell_type);
+ }
+ }
+
+ // Make sure that all cell types in this bucket have at least one
+ // BelId they can be placed at.
+ pool<IdString> cell_types_unused;
+
+ pool<BelId> bels_in_bucket;
+ for (BelId bel : ctx->getBelsInBucket(bucket)) {
+ BelBucketId bucket2 = ctx->getBelBucketForBel(bel);
+ log_assert(bucket == bucket2);
+
+ bels_in_bucket.insert(bel);
+
+ // Check to see if a cell type not in this bucket can be
+ // placed at a BEL in this bucket.
+ for (IdString cell_type : ctx->getCellTypes()) {
+ if (ctx->getBelBucketForCellType(cell_type) == bucket) {
+ if (ctx->isValidBelForCellType(cell_type, bel)) {
+ cell_types_unused.erase(cell_type);
+ }
+ } else {
+ log_assert(!ctx->isValidBelForCellType(cell_type, bel));
+ }
+ }
+ }
+
+ // Verify that any BEL not in this bucket reports a different
+ // bucket.
+ for (BelId bel : ctx->getBels()) {
+ if (ctx->getBelBucketForBel(bel) != bucket) {
+ log_assert(bels_in_bucket.count(bel) == 0);
+ }
+ }
+
+ log_assert(cell_types_unused.empty());
+ }
+}
+
+} // namespace
+
+NEXTPNR_NAMESPACE_BEGIN
+
+void Context::archcheck() const
+{
+ log_info("Running architecture database integrity check.\n");
+ log_break();
+
+ archcheck_names(this);
+ archcheck_locs(this);
+ archcheck_conn(this);
+ archcheck_buckets(this);
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/base_arch.h b/common/kernel/base_arch.h
new file mode 100644
index 00000000..3055619d
--- /dev/null
+++ b/common/kernel/base_arch.h
@@ -0,0 +1,486 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef BASE_ARCH_H
+#define BASE_ARCH_H
+
+#include <array>
+#include <vector>
+
+#include "arch_api.h"
+#include "base_clusterinfo.h"
+#include "idstring.h"
+#include "nextpnr_types.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace {
+// For several functions; such as bel/wire/pip attributes; the trivial implementation is to return an empty vector
+// But an arch might want to do something fancy with a custom range type that doesn't provide a constructor
+// So some cursed C++ is needed to return an empty object if possible; or error out if not; is needed
+template <typename Tc> typename std::enable_if<std::is_constructible<Tc>::value, Tc>::type empty_if_possible()
+{
+ return Tc();
+}
+template <typename Tc> typename std::enable_if<!std::is_constructible<Tc>::value, Tc>::type empty_if_possible()
+{
+ NPNR_ASSERT_FALSE("attempting to use default implementation of range-returning function with range type lacking "
+ "default constructor!");
+}
+
+// Provide a default implementation of bel bucket name if typedef'd to IdString
+template <typename Tbbid>
+typename std::enable_if<std::is_same<Tbbid, IdString>::value, IdString>::type bbid_to_name(Tbbid id)
+{
+ return id;
+}
+template <typename Tbbid>
+typename std::enable_if<!std::is_same<Tbbid, IdString>::value, IdString>::type bbid_to_name(Tbbid id)
+{
+ NPNR_ASSERT_FALSE("getBelBucketName must be implemented when BelBucketId is a type other than IdString!");
+}
+template <typename Tbbid>
+typename std::enable_if<std::is_same<Tbbid, IdString>::value, BelBucketId>::type bbid_from_name(IdString name)
+{
+ return name;
+}
+template <typename Tbbid>
+typename std::enable_if<!std::is_same<Tbbid, IdString>::value, BelBucketId>::type bbid_from_name(IdString name)
+{
+ NPNR_ASSERT_FALSE("getBelBucketByName must be implemented when BelBucketId is a type other than IdString!");
+}
+
+// For the cell type and bel type ranges; we want to return our stored vectors only if the type matches
+template <typename Tret, typename Tc>
+typename std::enable_if<std::is_same<Tret, Tc>::value, Tret>::type return_if_match(Tret r)
+{
+ return r;
+}
+
+template <typename Tret, typename Tc>
+typename std::enable_if<!std::is_same<Tret, Tc>::value, Tc>::type return_if_match(Tret r)
+{
+ NPNR_ASSERT_FALSE("default implementations of cell type and bel bucket range functions only available when the "
+ "respective range types are 'const std::vector&'");
+}
+
+// Default implementations of the clustering functions
+template <typename Tid>
+typename std::enable_if<std::is_same<Tid, IdString>::value, CellInfo *>::type get_cluster_root(const BaseCtx *ctx,
+ Tid cluster)
+{
+ return ctx->cells.at(cluster).get();
+}
+
+template <typename Tid>
+typename std::enable_if<!std::is_same<Tid, IdString>::value, CellInfo *>::type get_cluster_root(const BaseCtx *ctx,
+ Tid cluster)
+{
+ NPNR_ASSERT_FALSE("default implementation of getClusterRootCell requires ClusterId to be IdString");
+}
+
+// Executes the lambda with the base cluster data, only if the derivation works
+template <typename Tret, typename Tcell, typename Tfunc>
+typename std::enable_if<std::is_base_of<BaseClusterInfo, Tcell>::value, Tret>::type
+if_using_basecluster(const Tcell *cell, Tfunc func)
+{
+ return func(static_cast<const BaseClusterInfo *>(cell));
+}
+template <typename Tret, typename Tcell, typename Tfunc>
+typename std::enable_if<!std::is_base_of<BaseClusterInfo, Tcell>::value, Tret>::type
+if_using_basecluster(const Tcell *cell, Tfunc func)
+{
+ NPNR_ASSERT_FALSE(
+ "default implementation of cluster functions requires ArchCellInfo to derive from BaseClusterInfo");
+}
+
+} // namespace
+
+// This contains the relevant range types for the default implementations of Arch functions
+struct BaseArchRanges
+{
+ // Bels
+ using CellBelPinRangeT = std::array<IdString, 1>;
+ // Attributes
+ using BelAttrsRangeT = std::vector<std::pair<IdString, std::string>>;
+ using WireAttrsRangeT = std::vector<std::pair<IdString, std::string>>;
+ using PipAttrsRangeT = std::vector<std::pair<IdString, std::string>>;
+ // Groups
+ using AllGroupsRangeT = std::vector<GroupId>;
+ using GroupBelsRangeT = std::vector<BelId>;
+ using GroupWiresRangeT = std::vector<WireId>;
+ using GroupPipsRangeT = std::vector<PipId>;
+ using GroupGroupsRangeT = std::vector<GroupId>;
+ // Decals
+ using DecalGfxRangeT = std::vector<GraphicElement>;
+ // Placement validity
+ using CellTypeRangeT = const std::vector<IdString> &;
+ using BelBucketRangeT = const std::vector<BelBucketId> &;
+ using BucketBelRangeT = const std::vector<BelId> &;
+};
+
+template <typename R> struct BaseArch : ArchAPI<R>
+{
+ // --------------------------------------------------------------
+ // Default, trivial, implementations of Arch API functions for arches that don't need complex behaviours
+
+ // Basic config
+ virtual IdString archId() const override { return this->id(NPNR_STRINGIFY(ARCHNAME)); }
+ virtual IdString archArgsToId(typename R::ArchArgsT args) const override { return IdString(); }
+ virtual int getTilePipDimZ(int x, int y) const override { return 1; }
+ virtual char getNameDelimiter() const override { return ' '; }
+
+ // Bel methods
+ virtual uint32_t getBelChecksum(BelId bel) const override { return bel.hash(); }
+ virtual void bindBel(BelId bel, CellInfo *cell, PlaceStrength strength) override
+ {
+ NPNR_ASSERT(bel != BelId());
+ auto &entry = base_bel2cell[bel];
+ NPNR_ASSERT(entry == nullptr);
+ cell->bel = bel;
+ cell->belStrength = strength;
+ entry = cell;
+ this->refreshUiBel(bel);
+ }
+ virtual void unbindBel(BelId bel) override
+ {
+ NPNR_ASSERT(bel != BelId());
+ auto &entry = base_bel2cell[bel];
+ NPNR_ASSERT(entry != nullptr);
+ entry->bel = BelId();
+ entry->belStrength = STRENGTH_NONE;
+ entry = nullptr;
+ this->refreshUiBel(bel);
+ }
+
+ virtual bool getBelHidden(BelId bel) const override { return false; }
+
+ virtual bool getBelGlobalBuf(BelId bel) const override { return false; }
+ virtual bool checkBelAvail(BelId bel) const override { return getBoundBelCell(bel) == nullptr; };
+ virtual CellInfo *getBoundBelCell(BelId bel) const override
+ {
+ auto fnd = base_bel2cell.find(bel);
+ return fnd == base_bel2cell.end() ? nullptr : fnd->second;
+ }
+ virtual CellInfo *getConflictingBelCell(BelId bel) const override { return getBoundBelCell(bel); }
+ virtual typename R::BelAttrsRangeT getBelAttrs(BelId bel) const override
+ {
+ return empty_if_possible<typename R::BelAttrsRangeT>();
+ }
+
+ virtual typename R::CellBelPinRangeT getBelPinsForCellPin(const CellInfo *cell_info, IdString pin) const override
+ {
+ return return_if_match<std::array<IdString, 1>, typename R::CellBelPinRangeT>({pin});
+ }
+
+ // Wire methods
+ virtual IdString getWireType(WireId wire) const override { return IdString(); }
+ virtual typename R::WireAttrsRangeT getWireAttrs(WireId) const override
+ {
+ return empty_if_possible<typename R::WireAttrsRangeT>();
+ }
+ virtual uint32_t getWireChecksum(WireId wire) const override { return wire.hash(); }
+
+ virtual void bindWire(WireId wire, NetInfo *net, PlaceStrength strength) override
+ {
+ NPNR_ASSERT(wire != WireId());
+ auto &w2n_entry = base_wire2net[wire];
+ NPNR_ASSERT(w2n_entry == nullptr);
+ net->wires[wire].pip = PipId();
+ net->wires[wire].strength = strength;
+ w2n_entry = net;
+ this->refreshUiWire(wire);
+ }
+ virtual void unbindWire(WireId wire) override
+ {
+ NPNR_ASSERT(wire != WireId());
+ auto &w2n_entry = base_wire2net[wire];
+ NPNR_ASSERT(w2n_entry != nullptr);
+
+ auto &net_wires = w2n_entry->wires;
+ auto it = net_wires.find(wire);
+ NPNR_ASSERT(it != net_wires.end());
+
+ auto pip = it->second.pip;
+ if (pip != PipId()) {
+ base_pip2net[pip] = nullptr;
+ }
+
+ net_wires.erase(it);
+ base_wire2net[wire] = nullptr;
+
+ w2n_entry = nullptr;
+ this->refreshUiWire(wire);
+ }
+ virtual bool checkWireAvail(WireId wire) const override { return getBoundWireNet(wire) == nullptr; }
+ virtual NetInfo *getBoundWireNet(WireId wire) const override
+ {
+ auto fnd = base_wire2net.find(wire);
+ return fnd == base_wire2net.end() ? nullptr : fnd->second;
+ }
+ virtual WireId getConflictingWireWire(WireId wire) const override { return wire; };
+ virtual NetInfo *getConflictingWireNet(WireId wire) const override { return getBoundWireNet(wire); }
+
+ // Pip methods
+ virtual IdString getPipType(PipId pip) const override { return IdString(); }
+ virtual typename R::PipAttrsRangeT getPipAttrs(PipId) const override
+ {
+ return empty_if_possible<typename R::PipAttrsRangeT>();
+ }
+ virtual uint32_t getPipChecksum(PipId pip) const override { return pip.hash(); }
+ virtual void bindPip(PipId pip, NetInfo *net, PlaceStrength strength) override
+ {
+ NPNR_ASSERT(pip != PipId());
+ auto &p2n_entry = base_pip2net[pip];
+ NPNR_ASSERT(p2n_entry == nullptr);
+ p2n_entry = net;
+
+ WireId dst = this->getPipDstWire(pip);
+ auto &w2n_entry = base_wire2net[dst];
+ NPNR_ASSERT(w2n_entry == nullptr);
+ w2n_entry = net;
+ net->wires[dst].pip = pip;
+ net->wires[dst].strength = strength;
+ }
+ virtual void unbindPip(PipId pip) override
+ {
+ NPNR_ASSERT(pip != PipId());
+ auto &p2n_entry = base_pip2net[pip];
+ NPNR_ASSERT(p2n_entry != nullptr);
+ WireId dst = this->getPipDstWire(pip);
+
+ auto &w2n_entry = base_wire2net[dst];
+ NPNR_ASSERT(w2n_entry != nullptr);
+ w2n_entry = nullptr;
+
+ p2n_entry->wires.erase(dst);
+ p2n_entry = nullptr;
+ }
+ virtual bool checkPipAvail(PipId pip) const override { return getBoundPipNet(pip) == nullptr; }
+ virtual bool checkPipAvailForNet(PipId pip, NetInfo *net) const override
+ {
+ NetInfo *bound_net = getBoundPipNet(pip);
+ return bound_net == nullptr || bound_net == net;
+ }
+ virtual NetInfo *getBoundPipNet(PipId pip) const override
+ {
+ auto fnd = base_pip2net.find(pip);
+ return fnd == base_pip2net.end() ? nullptr : fnd->second;
+ }
+ virtual WireId getConflictingPipWire(PipId pip) const override { return WireId(); }
+ virtual NetInfo *getConflictingPipNet(PipId pip) const override { return getBoundPipNet(pip); }
+
+ // Group methods
+ virtual GroupId getGroupByName(IdStringList name) const override { return GroupId(); };
+ virtual IdStringList getGroupName(GroupId group) const override { return IdStringList(); };
+ virtual typename R::AllGroupsRangeT getGroups() const override
+ {
+ return empty_if_possible<typename R::AllGroupsRangeT>();
+ }
+ // Default implementation of these assumes no groups so never called
+ virtual typename R::GroupBelsRangeT getGroupBels(GroupId group) const override
+ {
+ NPNR_ASSERT_FALSE("unreachable");
+ };
+ virtual typename R::GroupWiresRangeT getGroupWires(GroupId group) const override
+ {
+ NPNR_ASSERT_FALSE("unreachable");
+ };
+ virtual typename R::GroupPipsRangeT getGroupPips(GroupId group) const override
+ {
+ NPNR_ASSERT_FALSE("unreachable");
+ };
+ virtual typename R::GroupGroupsRangeT getGroupGroups(GroupId group) const override
+ {
+ NPNR_ASSERT_FALSE("unreachable");
+ };
+
+ // Delay methods
+ virtual bool getBudgetOverride(const NetInfo *net_info, const PortRef &sink, delay_t &budget) const override
+ {
+ return false;
+ }
+
+ // Decal methods
+ virtual typename R::DecalGfxRangeT getDecalGraphics(DecalId decal) const override
+ {
+ return empty_if_possible<typename R::DecalGfxRangeT>();
+ };
+ virtual DecalXY getBelDecal(BelId bel) const override { return DecalXY(); }
+ virtual DecalXY getWireDecal(WireId wire) const override { return DecalXY(); }
+ virtual DecalXY getPipDecal(PipId pip) const override { return DecalXY(); }
+ virtual DecalXY getGroupDecal(GroupId group) const override { return DecalXY(); }
+
+ // Cell timing methods
+ virtual bool getCellDelay(const CellInfo *cell, IdString fromPort, IdString toPort, DelayQuad &delay) const override
+ {
+ return false;
+ }
+ virtual TimingPortClass getPortTimingClass(const CellInfo *cell, IdString port, int &clockInfoCount) const override
+ {
+ return TMG_IGNORE;
+ }
+ virtual TimingClockingInfo getPortClockingInfo(const CellInfo *cell, IdString port, int index) const override
+ {
+ NPNR_ASSERT_FALSE("unreachable");
+ }
+
+ // Placement validity checks
+ virtual bool isValidBelForCellType(IdString cell_type, BelId bel) const override
+ {
+ return cell_type == this->getBelType(bel);
+ }
+ virtual IdString getBelBucketName(BelBucketId bucket) const override { return bbid_to_name<BelBucketId>(bucket); }
+ virtual BelBucketId getBelBucketByName(IdString name) const override { return bbid_from_name<BelBucketId>(name); }
+ virtual BelBucketId getBelBucketForBel(BelId bel) const override
+ {
+ return getBelBucketForCellType(this->getBelType(bel));
+ };
+ virtual BelBucketId getBelBucketForCellType(IdString cell_type) const override
+ {
+ return getBelBucketByName(cell_type);
+ };
+ virtual bool isBelLocationValid(BelId bel) const override { return true; }
+ virtual typename R::CellTypeRangeT getCellTypes() const override
+ {
+ NPNR_ASSERT(cell_types_initialised);
+ return return_if_match<const std::vector<IdString> &, typename R::CellTypeRangeT>(cell_types);
+ }
+ virtual typename R::BelBucketRangeT getBelBuckets() const override
+ {
+ NPNR_ASSERT(bel_buckets_initialised);
+ return return_if_match<const std::vector<BelBucketId> &, typename R::BelBucketRangeT>(bel_buckets);
+ }
+ virtual typename R::BucketBelRangeT getBelsInBucket(BelBucketId bucket) const override
+ {
+ NPNR_ASSERT(bel_buckets_initialised);
+ return return_if_match<const std::vector<BelId> &, typename R::BucketBelRangeT>(bucket_bels.at(bucket));
+ }
+
+ // Cluster methods
+ virtual CellInfo *getClusterRootCell(ClusterId cluster) const override { return get_cluster_root(this, cluster); }
+
+ virtual ArcBounds getClusterBounds(ClusterId cluster) const override
+ {
+ return if_using_basecluster<ArcBounds>(get_cluster_root(this, cluster), [](const BaseClusterInfo *cluster) {
+ ArcBounds bounds(0, 0, 0, 0);
+ for (auto child : cluster->constr_children) {
+ if_using_basecluster<void>(child, [&](const BaseClusterInfo *child) {
+ bounds.x0 = std::min(bounds.x0, child->constr_x);
+ bounds.y0 = std::min(bounds.y0, child->constr_y);
+ bounds.x1 = std::max(bounds.x1, child->constr_x);
+ bounds.y1 = std::max(bounds.y1, child->constr_y);
+ });
+ }
+ return bounds;
+ });
+ }
+
+ virtual Loc getClusterOffset(const CellInfo *cell) const override
+ {
+ return if_using_basecluster<Loc>(cell,
+ [](const BaseClusterInfo *c) { return Loc(c->constr_x, c->constr_y, 0); });
+ }
+
+ virtual bool isClusterStrict(const CellInfo *cell) const override { return true; }
+
+ virtual bool getClusterPlacement(ClusterId cluster, BelId root_bel,
+ std::vector<std::pair<CellInfo *, BelId>> &placement) const override
+ {
+ CellInfo *root_cell = get_cluster_root(this, cluster);
+ return if_using_basecluster<bool>(root_cell, [&](const BaseClusterInfo *cluster) -> bool {
+ placement.clear();
+ NPNR_ASSERT(root_bel != BelId());
+ Loc root_loc = this->getBelLocation(root_bel);
+
+ if (cluster->constr_abs_z) {
+ // Coerce root to absolute z constraint
+ root_loc.z = cluster->constr_z;
+ root_bel = this->getBelByLocation(root_loc);
+ if (root_bel == BelId() || !this->isValidBelForCellType(root_cell->type, root_bel))
+ return false;
+ }
+ placement.emplace_back(root_cell, root_bel);
+
+ for (auto child : cluster->constr_children) {
+ Loc child_loc = if_using_basecluster<Loc>(child, [&](const BaseClusterInfo *child) {
+ Loc result;
+ result.x = root_loc.x + child->constr_x;
+ result.y = root_loc.y + child->constr_y;
+ result.z = child->constr_abs_z ? child->constr_z : (root_loc.z + child->constr_z);
+ return result;
+ });
+ BelId child_bel = this->getBelByLocation(child_loc);
+ if (child_bel == BelId() || !this->isValidBelForCellType(child->type, child_bel))
+ return false;
+ placement.emplace_back(child, child_bel);
+ }
+ return true;
+ });
+ }
+
+ // Flow methods
+ virtual void assignArchInfo() override{};
+
+ // --------------------------------------------------------------
+ // These structures are used to provide default implementations of bel/wire/pip binding. Arches might want to
+ // replace them with their own, for example to use faster access structures than dict. Arches might also
+ // want to add extra checks around these functions
+ dict<BelId, CellInfo *> base_bel2cell;
+ dict<WireId, NetInfo *> base_wire2net;
+ dict<PipId, NetInfo *> base_pip2net;
+
+ // For the default cell/bel bucket implementations
+ std::vector<IdString> cell_types;
+ std::vector<BelBucketId> bel_buckets;
+ dict<BelBucketId, std::vector<BelId>> bucket_bels;
+
+ // Arches that want to use the default cell types and bel buckets *must* call these functions in their constructor
+ bool cell_types_initialised = false;
+ bool bel_buckets_initialised = false;
+ void init_cell_types()
+ {
+ pool<IdString> bel_types;
+ for (auto bel : this->getBels())
+ bel_types.insert(this->getBelType(bel));
+ std::copy(bel_types.begin(), bel_types.end(), std::back_inserter(cell_types));
+ std::sort(cell_types.begin(), cell_types.end());
+ cell_types_initialised = true;
+ }
+ void init_bel_buckets()
+ {
+ for (auto cell_type : this->getCellTypes()) {
+ auto bucket = this->getBelBucketForCellType(cell_type);
+ bucket_bels[bucket]; // create empty bucket
+ }
+ for (auto bel : this->getBels()) {
+ auto bucket = this->getBelBucketForBel(bel);
+ bucket_bels[bucket].push_back(bel);
+ }
+ for (auto &b : bucket_bels)
+ bel_buckets.push_back(b.first);
+ std::sort(bel_buckets.begin(), bel_buckets.end());
+ bel_buckets_initialised = true;
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* BASE_ARCH_H */
diff --git a/common/kernel/base_clusterinfo.h b/common/kernel/base_clusterinfo.h
new file mode 100644
index 00000000..65e8e6d4
--- /dev/null
+++ b/common/kernel/base_clusterinfo.h
@@ -0,0 +1,45 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef BASE_CLUSTERINFO_H
+#define BASE_CLUSTERINFO_H
+
+#include "idstring.h"
+#include "nextpnr_namespaces.h"
+
+#include <vector>
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct CellInfo;
+
+// The 'legacy' cluster data, used for existing arches and to provide a basic implementation for arches without complex
+// clustering requirements
+struct BaseClusterInfo
+{
+ std::vector<CellInfo *> constr_children;
+ int constr_x = 0; // this.x - parent.x
+ int constr_y = 0; // this.y - parent.y
+ int constr_z = 0; // this.z - parent.z
+ bool constr_abs_z = false; // parent.z := 0
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* BASE_ARCH_H */
diff --git a/common/kernel/basectx.cc b/common/kernel/basectx.cc
new file mode 100644
index 00000000..83a2deea
--- /dev/null
+++ b/common/kernel/basectx.cc
@@ -0,0 +1,279 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "basectx.h"
+
+#include <boost/algorithm/string.hpp>
+
+#include "context.h"
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+const char *BaseCtx::nameOfBel(BelId bel) const
+{
+ const Context *ctx = getCtx();
+ std::string &s = ctx->log_strs.next();
+ ctx->getBelName(bel).build_str(ctx, s);
+ return s.c_str();
+}
+
+const char *BaseCtx::nameOfWire(WireId wire) const
+{
+ const Context *ctx = getCtx();
+ std::string &s = ctx->log_strs.next();
+ ctx->getWireName(wire).build_str(ctx, s);
+ return s.c_str();
+}
+
+const char *BaseCtx::nameOfPip(PipId pip) const
+{
+ const Context *ctx = getCtx();
+ std::string &s = ctx->log_strs.next();
+ ctx->getPipName(pip).build_str(ctx, s);
+ return s.c_str();
+}
+
+const char *BaseCtx::nameOfGroup(GroupId group) const
+{
+ const Context *ctx = getCtx();
+ std::string &s = ctx->log_strs.next();
+ ctx->getGroupName(group).build_str(ctx, s);
+ return s.c_str();
+}
+
+BelId BaseCtx::getBelByNameStr(const std::string &str)
+{
+ Context *ctx = getCtx();
+ return ctx->getBelByName(IdStringList::parse(ctx, str));
+}
+
+WireId BaseCtx::getWireByNameStr(const std::string &str)
+{
+ Context *ctx = getCtx();
+ return ctx->getWireByName(IdStringList::parse(ctx, str));
+}
+
+PipId BaseCtx::getPipByNameStr(const std::string &str)
+{
+ Context *ctx = getCtx();
+ return ctx->getPipByName(IdStringList::parse(ctx, str));
+}
+
+GroupId BaseCtx::getGroupByNameStr(const std::string &str)
+{
+ Context *ctx = getCtx();
+ return ctx->getGroupByName(IdStringList::parse(ctx, str));
+}
+
+void BaseCtx::addClock(IdString net, float freq)
+{
+ std::unique_ptr<ClockConstraint> cc(new ClockConstraint());
+ cc->period = DelayPair(getCtx()->getDelayFromNS(1000 / freq));
+ cc->high = DelayPair(getCtx()->getDelayFromNS(500 / freq));
+ cc->low = DelayPair(getCtx()->getDelayFromNS(500 / freq));
+ if (!net_aliases.count(net)) {
+ log_warning("net '%s' does not exist in design, ignoring clock constraint\n", net.c_str(this));
+ } else {
+ getNetByAlias(net)->clkconstr = std::move(cc);
+ log_info("constraining clock net '%s' to %.02f MHz\n", net.c_str(this), freq);
+ }
+}
+
+void BaseCtx::createRectangularRegion(IdString name, int x0, int y0, int x1, int y1)
+{
+ std::unique_ptr<Region> new_region(new Region());
+ new_region->name = name;
+ new_region->constr_bels = true;
+ new_region->constr_pips = false;
+ new_region->constr_wires = false;
+ for (int x = x0; x <= x1; x++) {
+ for (int y = y0; y <= y1; y++) {
+ for (auto bel : getCtx()->getBelsByTile(x, y))
+ new_region->bels.insert(bel);
+ }
+ }
+ region[name] = std::move(new_region);
+}
+void BaseCtx::addBelToRegion(IdString name, BelId bel) { region[name]->bels.insert(bel); }
+void BaseCtx::constrainCellToRegion(IdString cell, IdString region_name)
+{
+ // Support hierarchical cells as well as leaf ones
+ bool matched = false;
+ if (hierarchy.count(cell)) {
+ auto &hc = hierarchy.at(cell);
+ for (auto &lc : hc.leaf_cells)
+ constrainCellToRegion(lc.second, region_name);
+ for (auto &hsc : hc.hier_cells)
+ constrainCellToRegion(hsc.second, region_name);
+ matched = true;
+ }
+ if (cells.count(cell)) {
+ cells.at(cell)->region = region[region_name].get();
+ matched = true;
+ }
+ if (!matched)
+ log_warning("No cell matched '%s' when constraining to region '%s'\n", nameOf(cell), nameOf(region_name));
+}
+DecalXY BaseCtx::constructDecalXY(DecalId decal, float x, float y)
+{
+ DecalXY dxy;
+ dxy.decal = decal;
+ dxy.x = x;
+ dxy.y = y;
+ return dxy;
+}
+
+void BaseCtx::archInfoToAttributes()
+{
+ for (auto &cell : cells) {
+ auto ci = cell.second.get();
+ if (ci->bel != BelId()) {
+ if (ci->attrs.find(id("BEL")) != ci->attrs.end()) {
+ ci->attrs.erase(ci->attrs.find(id("BEL")));
+ }
+ ci->attrs[id("NEXTPNR_BEL")] = getCtx()->getBelName(ci->bel).str(getCtx());
+ ci->attrs[id("BEL_STRENGTH")] = (int)ci->belStrength;
+ }
+ }
+ for (auto &net : getCtx()->nets) {
+ auto ni = net.second.get();
+ std::string routing;
+ bool first = true;
+ for (auto &item : ni->wires) {
+ if (!first)
+ routing += ";";
+ routing += getCtx()->getWireName(item.first).str(getCtx());
+ routing += ";";
+ if (item.second.pip != PipId())
+ routing += getCtx()->getPipName(item.second.pip).str(getCtx());
+ routing += ";" + std::to_string(item.second.strength);
+ first = false;
+ }
+ ni->attrs[id("ROUTING")] = routing;
+ }
+}
+
+void BaseCtx::attributesToArchInfo()
+{
+ for (auto &cell : cells) {
+ auto ci = cell.second.get();
+ auto val = ci->attrs.find(id("NEXTPNR_BEL"));
+ if (val != ci->attrs.end()) {
+ auto str = ci->attrs.find(id("BEL_STRENGTH"));
+ PlaceStrength strength = PlaceStrength::STRENGTH_USER;
+ if (str != ci->attrs.end())
+ strength = (PlaceStrength)str->second.as_int64();
+
+ BelId b = getCtx()->getBelByNameStr(val->second.as_string());
+ getCtx()->bindBel(b, ci, strength);
+ }
+ }
+ for (auto &net : getCtx()->nets) {
+ auto ni = net.second.get();
+ auto val = ni->attrs.find(id("ROUTING"));
+ if (val != ni->attrs.end()) {
+ std::vector<std::string> strs;
+ auto routing = val->second.as_string();
+ boost::split(strs, routing, boost::is_any_of(";"));
+ for (size_t i = 0; i < strs.size() / 3; i++) {
+ std::string wire = strs[i * 3];
+ std::string pip = strs[i * 3 + 1];
+ PlaceStrength strength = (PlaceStrength)std::stoi(strs[i * 3 + 2]);
+ if (pip.empty())
+ getCtx()->bindWire(getCtx()->getWireByName(IdStringList::parse(getCtx(), wire)), ni, strength);
+ else
+ getCtx()->bindPip(getCtx()->getPipByName(IdStringList::parse(getCtx(), pip)), ni, strength);
+ }
+ }
+ }
+ getCtx()->assignArchInfo();
+}
+
+NetInfo *BaseCtx::createNet(IdString name)
+{
+ NPNR_ASSERT(!nets.count(name));
+ NPNR_ASSERT(!net_aliases.count(name));
+ auto net = std::make_unique<NetInfo>(name);
+ net_aliases[name] = name;
+ NetInfo *ptr = net.get();
+ nets[name] = std::move(net);
+ refreshUi();
+ return ptr;
+}
+
+void BaseCtx::connectPort(IdString net, IdString cell, IdString port)
+{
+ NetInfo *net_info = getNetByAlias(net);
+ CellInfo *cell_info = cells.at(cell).get();
+ cell_info->connectPort(port, net_info);
+}
+
+void BaseCtx::disconnectPort(IdString cell, IdString port)
+{
+ CellInfo *cell_info = cells.at(cell).get();
+ cell_info->disconnectPort(port);
+}
+
+void BaseCtx::renameNet(IdString old_name, IdString new_name)
+{
+ NetInfo *net = nets.at(old_name).get();
+ NPNR_ASSERT(!nets.count(new_name));
+ nets[new_name];
+ std::swap(nets.at(net->name), nets.at(new_name));
+ nets.erase(net->name);
+ net->name = new_name;
+}
+
+void BaseCtx::ripupNet(IdString name)
+{
+ NetInfo *net_info = getNetByAlias(name);
+ std::vector<WireId> to_unbind;
+ for (auto &wire : net_info->wires)
+ to_unbind.push_back(wire.first);
+ for (auto &unbind : to_unbind)
+ getCtx()->unbindWire(unbind);
+}
+void BaseCtx::lockNetRouting(IdString name)
+{
+ NetInfo *net_info = getNetByAlias(name);
+ for (auto &wire : net_info->wires)
+ wire.second.strength = STRENGTH_USER;
+}
+
+CellInfo *BaseCtx::createCell(IdString name, IdString type)
+{
+ NPNR_ASSERT(!cells.count(name));
+ auto cell = std::make_unique<CellInfo>(getCtx(), name, type);
+ CellInfo *ptr = cell.get();
+ cells[name] = std::move(cell);
+ refreshUi();
+ return ptr;
+}
+
+void BaseCtx::copyBelPorts(IdString cell, BelId bel)
+{
+ CellInfo *cell_info = cells.at(cell).get();
+ for (auto pin : getCtx()->getBelPins(bel)) {
+ cell_info->ports[pin].name = pin;
+ cell_info->ports[pin].type = getCtx()->getBelPinType(bel, pin);
+ }
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/basectx.h b/common/kernel/basectx.h
new file mode 100644
index 00000000..21d6d63a
--- /dev/null
+++ b/common/kernel/basectx.h
@@ -0,0 +1,243 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef BASECTX_H
+#define BASECTX_H
+
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+#ifndef NPNR_DISABLE_THREADS
+#include <boost/thread.hpp>
+#endif
+
+#include "hashlib.h"
+#include "idstring.h"
+#include "nextpnr_namespaces.h"
+#include "nextpnr_types.h"
+#include "property.h"
+#include "str_ring_buffer.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Context;
+
+struct BaseCtx
+{
+#ifndef NPNR_DISABLE_THREADS
+ // Lock to perform mutating actions on the Context.
+ std::mutex mutex;
+ boost::thread::id mutex_owner;
+
+ // Lock to be taken by UI when wanting to access context - the yield()
+ // method will lock/unlock it when its' released the main mutex to make
+ // sure the UI is not starved.
+ std::mutex ui_mutex;
+#endif
+
+ // ID String database.
+ mutable std::unordered_map<std::string, int> *idstring_str_to_idx;
+ mutable std::vector<const std::string *> *idstring_idx_to_str;
+
+ // Temporary string backing store for logging
+ mutable StrRingBuffer log_strs;
+
+ // Project settings and config switches
+ dict<IdString, Property> settings;
+
+ // Placed nets and cells.
+ dict<IdString, std::unique_ptr<NetInfo>> nets;
+ dict<IdString, std::unique_ptr<CellInfo>> cells;
+
+ // Hierarchical (non-leaf) cells by full path
+ dict<IdString, HierarchicalCell> hierarchy;
+ // This is the root of the above structure
+ IdString top_module;
+
+ // Aliases for nets, which may have more than one name due to assignments and hierarchy
+ dict<IdString, IdString> net_aliases;
+
+ // Top-level ports
+ dict<IdString, PortInfo> ports;
+ dict<IdString, CellInfo *> port_cells;
+
+ // Floorplanning regions
+ dict<IdString, std::unique_ptr<Region>> region;
+
+ // Context meta data
+ dict<IdString, Property> attrs;
+
+ // Fmax data post timing analysis
+ TimingResult timing_result;
+
+ Context *as_ctx = nullptr;
+
+ // Has the frontend loaded a design?
+ bool design_loaded;
+
+ BaseCtx()
+ {
+ idstring_str_to_idx = new std::unordered_map<std::string, int>;
+ idstring_idx_to_str = new std::vector<const std::string *>;
+ IdString::initialize_add(this, "", 0);
+ IdString::initialize_arch(this);
+
+ design_loaded = false;
+ }
+
+ virtual ~BaseCtx()
+ {
+ delete idstring_str_to_idx;
+ delete idstring_idx_to_str;
+ }
+
+ // Must be called before performing any mutating changes on the Ctx/Arch.
+ void lock(void)
+ {
+#ifndef NPNR_DISABLE_THREADS
+ mutex.lock();
+ mutex_owner = boost::this_thread::get_id();
+#endif
+ }
+
+ void unlock(void)
+ {
+#ifndef NPNR_DISABLE_THREADS
+ NPNR_ASSERT(boost::this_thread::get_id() == mutex_owner);
+ mutex.unlock();
+#endif
+ }
+
+ // Must be called by the UI before rendering data. This lock will be
+ // prioritized when processing code calls yield().
+ void lock_ui(void)
+ {
+#ifndef NPNR_DISABLE_THREADS
+ ui_mutex.lock();
+ mutex.lock();
+#endif
+ }
+
+ void unlock_ui(void)
+ {
+#ifndef NPNR_DISABLE_THREADS
+ mutex.unlock();
+ ui_mutex.unlock();
+#endif
+ }
+
+ // Yield to UI by unlocking the main mutex, flashing the UI mutex and
+ // relocking the main mutex. Call this when you're performing a
+ // long-standing action while holding a lock to let the UI show
+ // visualization updates.
+ // Must be called with the main lock taken.
+ void yield(void)
+ {
+#ifndef NPNR_DISABLE_THREADS
+ unlock();
+ ui_mutex.lock();
+ ui_mutex.unlock();
+ lock();
+#endif
+ }
+
+ IdString id(const std::string &s) const { return IdString(this, s); }
+
+ IdString id(const char *s) const { return IdString(this, s); }
+
+ Context *getCtx() { return as_ctx; }
+
+ const Context *getCtx() const { return as_ctx; }
+
+ const char *nameOf(IdString name) const { return name.c_str(this); }
+
+ template <typename T> const char *nameOf(const T *obj) const
+ {
+ if (obj == nullptr)
+ return "";
+ return obj->name.c_str(this);
+ }
+
+ const char *nameOfBel(BelId bel) const;
+ const char *nameOfWire(WireId wire) const;
+ const char *nameOfPip(PipId pip) const;
+ const char *nameOfGroup(GroupId group) const;
+
+ // Wrappers of arch functions that take a string and handle IdStringList parsing
+ BelId getBelByNameStr(const std::string &str);
+ WireId getWireByNameStr(const std::string &str);
+ PipId getPipByNameStr(const std::string &str);
+ GroupId getGroupByNameStr(const std::string &str);
+
+ // --------------------------------------------------------------
+
+ bool allUiReload = true;
+ bool frameUiReload = false;
+ pool<BelId> belUiReload;
+ pool<WireId> wireUiReload;
+ pool<PipId> pipUiReload;
+ pool<GroupId> groupUiReload;
+
+ void refreshUi() { allUiReload = true; }
+
+ void refreshUiFrame() { frameUiReload = true; }
+
+ void refreshUiBel(BelId bel) { belUiReload.insert(bel); }
+
+ void refreshUiWire(WireId wire) { wireUiReload.insert(wire); }
+
+ void refreshUiPip(PipId pip) { pipUiReload.insert(pip); }
+
+ void refreshUiGroup(GroupId group) { groupUiReload.insert(group); }
+
+ // --------------------------------------------------------------
+
+ NetInfo *getNetByAlias(IdString alias) const
+ {
+ return nets.count(alias) ? nets.at(alias).get() : nets.at(net_aliases.at(alias)).get();
+ }
+
+ // Intended to simplify Python API
+ void addClock(IdString net, float freq);
+ void createRectangularRegion(IdString name, int x0, int y0, int x1, int y1);
+ void addBelToRegion(IdString name, BelId bel);
+ void constrainCellToRegion(IdString cell, IdString region_name);
+
+ // Helper functions for Python bindings
+ NetInfo *createNet(IdString name);
+ void connectPort(IdString net, IdString cell, IdString port);
+ void disconnectPort(IdString cell, IdString port);
+ void ripupNet(IdString name);
+ void lockNetRouting(IdString name);
+ void renameNet(IdString old_name, IdString new_name);
+
+ CellInfo *createCell(IdString name, IdString type);
+ void copyBelPorts(IdString cell, BelId bel);
+
+ // Workaround for lack of wrappable constructors
+ DecalXY constructDecalXY(DecalId decal, float x, float y);
+
+ void archInfoToAttributes();
+ void attributesToArchInfo();
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* BASECTX_H */
diff --git a/common/kernel/bits.cc b/common/kernel/bits.cc
new file mode 100644
index 00000000..b20c2e86
--- /dev/null
+++ b/common/kernel/bits.cc
@@ -0,0 +1,56 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (c) 2013 Mike Pedersen
+ * Copyright (C) 2021 Symbiflow Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "bits.h"
+
+#include <limits>
+#include <stdexcept>
+
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+int Bits::generic_popcount(unsigned int v)
+{
+ unsigned int c; // c accumulates the total bits set in v
+ for (c = 0; v; c++) {
+ v &= v - 1; // clear the least significant bit set
+ }
+
+ return c;
+}
+
+int Bits::generic_ctz(unsigned int x)
+{
+ if (x == 0) {
+ log_error("Cannot call ctz with arg = 0");
+ }
+
+ for (size_t i = 0; i < std::numeric_limits<unsigned int>::digits; ++i) {
+ if ((x & (1 << i)) != 0) {
+ return i;
+ }
+ }
+
+ // Unreachable!
+ log_error("Unreachable!");
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/bits.h b/common/kernel/bits.h
new file mode 100644
index 00000000..04b25b74
--- /dev/null
+++ b/common/kernel/bits.h
@@ -0,0 +1,76 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (c) 2013 Mike Pedersen
+ * Copyright (C) 2021 Symbiflow Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+// This is a small library for implementing common bit vector utilities,
+// namely:
+//
+// - popcount : The number of bits set in an unsigned int
+// - ctz : The number of trailing zero bits in an unsigned int.
+// Must be called with a value that has at least 1 bit set.
+//
+// These methods will typically use instrinics when available, and have a
+// generic fallback in the event that the instrinic is not available.
+//
+// If clz (count leading zeros) is needed, it can be added when needed.
+#ifndef BITS_H
+#define BITS_H
+
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#pragma intrinsic(_BitScanForward, _BitScanReverse, __popcnt)
+#endif
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Bits
+{
+ static int generic_popcount(unsigned int x);
+ static int generic_ctz(unsigned int x);
+
+ static int popcount(unsigned int x)
+ {
+#if defined(__GNUC__) || defined(__clang__)
+ return __builtin_popcount(x);
+#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
+ return __popcnt(x);
+#else
+ return generic_popcount(x);
+#endif
+ }
+
+ static int ctz(unsigned int x)
+ {
+#if defined(__GNUC__) || defined(__clang__)
+ return __builtin_ctz(x);
+#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
+ unsigned long result;
+ _BitScanForward(&result, x);
+ return result;
+#else
+ return generic_ctz(x);
+#endif
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* BITS_H */
diff --git a/common/kernel/chain_utils.h b/common/kernel/chain_utils.h
new file mode 100644
index 00000000..ca8a1be3
--- /dev/null
+++ b/common/kernel/chain_utils.h
@@ -0,0 +1,69 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef CHAIN_UTILS_H
+#define CHAIN_UTILS_H
+
+#include "nextpnr.h"
+#include "util.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct CellChain
+{
+ std::vector<CellInfo *> cells;
+};
+
+// Generic chain finder
+template <typename F1, typename F2, typename F3>
+std::vector<CellChain> find_chains(const Context *ctx, F1 cell_type_predicate, F2 get_previous, F3 get_next,
+ size_t min_length = 2)
+{
+ std::set<IdString> chained;
+ std::vector<CellChain> chains;
+ for (auto &cell : ctx->cells) {
+ if (chained.find(cell.first) != chained.end())
+ continue;
+ CellInfo *ci = cell.second.get();
+ if (cell_type_predicate(ctx, ci)) {
+ CellInfo *start = ci;
+ CellInfo *prev_start = ci;
+ while (prev_start != nullptr) {
+ start = prev_start;
+ prev_start = get_previous(ctx, start);
+ }
+ CellChain chain;
+ CellInfo *end = start;
+ while (end != nullptr) {
+ if (chained.insert(end->name).second)
+ chain.cells.push_back(end);
+ end = get_next(ctx, end);
+ }
+ if (chain.cells.size() >= min_length) {
+ chains.push_back(chain);
+ for (auto c : chain.cells)
+ chained.insert(c->name);
+ }
+ }
+ }
+ return chains;
+}
+
+NEXTPNR_NAMESPACE_END
+#endif
diff --git a/common/kernel/command.cc b/common/kernel/command.cc
new file mode 100644
index 00000000..00f900b3
--- /dev/null
+++ b/common/kernel/command.cc
@@ -0,0 +1,563 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Miodrag Milanovic <micko@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef NO_GUI
+#include <QApplication>
+#include "application.h"
+#include "mainwindow.h"
+#endif
+#ifndef NO_PYTHON
+#include "pybindings.h"
+#endif
+
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/join.hpp>
+#include <boost/filesystem/convenience.hpp>
+#include <boost/program_options.hpp>
+#include <fstream>
+#include <iostream>
+#include <random>
+#include "command.h"
+#include "design_utils.h"
+#include "json_frontend.h"
+#include "jsonwrite.h"
+#include "log.h"
+#include "timing.h"
+#include "util.h"
+#include "version.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct no_separator : std::numpunct<char>
+{
+ protected:
+ virtual string_type do_grouping() const { return "\000"; } // groups of 0 (disable)
+};
+
+CommandHandler::CommandHandler(int argc, char **argv) : argc(argc), argv(argv)
+{
+ try {
+ std::locale loc("");
+ std::locale::global(std::locale(loc, new no_separator()));
+ } catch (const std::runtime_error &e) {
+ // the locale is broken in this system, so leave it as it is
+ }
+ log_streams.clear();
+}
+
+bool CommandHandler::parseOptions()
+{
+ options.add(getGeneralOptions()).add(getArchOptions());
+ try {
+ po::parsed_options parsed =
+ po::command_line_parser(argc, argv)
+ .style(po::command_line_style::default_style ^ po::command_line_style::allow_guessing)
+ .options(options)
+ .positional(pos)
+ .run();
+ po::store(parsed, vm);
+ po::notify(vm);
+ return true;
+ } catch (std::exception &e) {
+ std::cout << e.what() << "\n";
+ return false;
+ }
+}
+
+bool CommandHandler::executeBeforeContext()
+{
+ if (vm.count("help") || argc == 1) {
+ std::cerr << boost::filesystem::basename(argv[0])
+ << " -- Next Generation Place and Route (Version " GIT_DESCRIBE_STR ")\n";
+ std::cerr << options << "\n";
+ return argc != 1;
+ }
+
+ if (vm.count("version")) {
+ std::cerr << boost::filesystem::basename(argv[0])
+ << " -- Next Generation Place and Route (Version " GIT_DESCRIBE_STR ")\n";
+ return true;
+ }
+ validate();
+
+ if (vm.count("quiet")) {
+ log_streams.push_back(std::make_pair(&std::cerr, LogLevel::WARNING_MSG));
+ } else {
+ log_streams.push_back(std::make_pair(&std::cerr, LogLevel::LOG_MSG));
+ }
+
+ if (vm.count("log")) {
+ std::string logfilename = vm["log"].as<std::string>();
+ logfile.open(logfilename);
+ if (!logfile.is_open())
+ log_error("Failed to open log file '%s' for writing.\n", logfilename.c_str());
+ log_streams.push_back(std::make_pair(&logfile, LogLevel::LOG_MSG));
+ }
+ return false;
+}
+
+po::options_description CommandHandler::getGeneralOptions()
+{
+ po::options_description general("General options");
+ general.add_options()("help,h", "show help");
+ general.add_options()("verbose,v", "verbose output");
+ general.add_options()("quiet,q", "quiet mode, only errors and warnings displayed");
+ general.add_options()("log,l", po::value<std::string>(),
+ "log file, all log messages are written to this file regardless of -q");
+ general.add_options()("debug", "debug output");
+ general.add_options()("debug-placer", "debug output from placer only");
+ general.add_options()("debug-router", "debug output from router only");
+ general.add_options()("threads", po::value<int>(), "number of threads for passes where this is configurable");
+
+ general.add_options()("force,f", "keep running after errors");
+#ifndef NO_GUI
+ general.add_options()("gui", "start gui");
+ general.add_options()("gui-no-aa", "disable anti aliasing (use together with --gui option)");
+#endif
+#ifndef NO_PYTHON
+ general.add_options()("run", po::value<std::vector<std::string>>(),
+ "python file to execute instead of default flow");
+ pos.add("run", -1);
+ general.add_options()("pre-pack", po::value<std::vector<std::string>>(), "python file to run before packing");
+ general.add_options()("pre-place", po::value<std::vector<std::string>>(), "python file to run before placement");
+ general.add_options()("pre-route", po::value<std::vector<std::string>>(), "python file to run before routing");
+ general.add_options()("post-route", po::value<std::vector<std::string>>(), "python file to run after routing");
+ general.add_options()("on-failure", po::value<std::vector<std::string>>(),
+ "python file to run in event of crash for design introspection");
+
+#endif
+ general.add_options()("json", po::value<std::string>(), "JSON design file to ingest");
+ general.add_options()("write", po::value<std::string>(), "JSON design file to write");
+ general.add_options()("top", po::value<std::string>(), "name of top module");
+ general.add_options()("seed", po::value<int>(), "seed value for random number generator");
+ general.add_options()("randomize-seed,r", "randomize seed value for random number generator");
+
+ general.add_options()(
+ "placer", po::value<std::string>(),
+ std::string("placer algorithm to use; available: " + boost::algorithm::join(Arch::availablePlacers, ", ") +
+ "; default: " + Arch::defaultPlacer)
+ .c_str());
+
+ general.add_options()(
+ "router", po::value<std::string>(),
+ std::string("router algorithm to use; available: " + boost::algorithm::join(Arch::availableRouters, ", ") +
+ "; default: " + Arch::defaultRouter)
+ .c_str());
+
+ general.add_options()("slack_redist_iter", po::value<int>(), "number of iterations between slack redistribution");
+ general.add_options()("cstrweight", po::value<float>(), "placer weighting for relative constraint satisfaction");
+ general.add_options()("starttemp", po::value<float>(), "placer SA start temperature");
+ general.add_options()("placer-budgets", "use budget rather than criticality in placer timing weights");
+
+ general.add_options()("pack-only", "pack design only without placement or routing");
+ general.add_options()("no-route", "process design without routing");
+ general.add_options()("no-place", "process design without placement");
+ general.add_options()("no-pack", "process design without packing");
+
+ general.add_options()("ignore-loops", "ignore combinational loops in timing analysis");
+
+ general.add_options()("version,V", "show version");
+ general.add_options()("test", "check architecture database integrity");
+ general.add_options()("freq", po::value<double>(), "set target frequency for design in MHz");
+ general.add_options()("timing-allow-fail", "allow timing to fail in design");
+ general.add_options()("no-tmdriv", "disable timing-driven placement");
+ general.add_options()("sdf", po::value<std::string>(), "SDF delay back-annotation file to write");
+ general.add_options()("sdf-cvc", "enable tweaks for SDF file compatibility with the CVC simulator");
+ general.add_options()("no-print-critical-path-source",
+ "disable printing of the line numbers associated with each net in the critical path");
+
+ general.add_options()("placer-heap-alpha", po::value<float>(), "placer heap alpha value (float, default: 0.1)");
+ general.add_options()("placer-heap-beta", po::value<float>(), "placer heap beta value (float, default: 0.9)");
+ general.add_options()("placer-heap-critexp", po::value<int>(),
+ "placer heap criticality exponent (int, default: 2)");
+ general.add_options()("placer-heap-timingweight", po::value<int>(), "placer heap timing weight (int, default: 10)");
+
+#if !defined(__wasm)
+ general.add_options()("parallel-refine", "use new experimental parallelised engine for placement refinement");
+#endif
+
+ general.add_options()("router2-heatmap", po::value<std::string>(),
+ "prefix for router2 resource congestion heatmaps");
+
+ general.add_options()("tmg-ripup", "enable experimental timing-driven ripup in router");
+ general.add_options()("router2-tmg-ripup",
+ "enable experimental timing-driven ripup in router (deprecated; use --tmg-ripup instead)");
+
+ general.add_options()("report", po::value<std::string>(),
+ "write timing and utilization report in JSON format to file");
+ general.add_options()("detailed-timing-report", "Append detailed net timing data to the JSON report");
+
+ general.add_options()("placed-svg", po::value<std::string>(), "write render of placement to SVG file");
+ general.add_options()("routed-svg", po::value<std::string>(), "write render of routing to SVG file");
+
+ return general;
+}
+
+namespace {
+static CommandHandler *global_command_handler = nullptr;
+void script_terminate_handler()
+{
+ if (global_command_handler != nullptr)
+ global_command_handler->run_script_hook("on-failure");
+}
+}; // namespace
+
+void CommandHandler::setupContext(Context *ctx)
+{
+ if (ctx->settings.find(ctx->id("seed")) != ctx->settings.end())
+ ctx->rngstate = ctx->setting<uint64_t>("seed");
+
+ if (vm.count("verbose")) {
+ ctx->verbose = true;
+ }
+
+ if (vm.count("debug")) {
+ ctx->verbose = true;
+ ctx->debug = true;
+ }
+
+ if (vm.count("no-print-critical-path-source")) {
+ ctx->disable_critical_path_source_print = true;
+ }
+
+ if (vm.count("force")) {
+ ctx->force = true;
+ }
+
+ if (vm.count("seed")) {
+ ctx->rngseed(vm["seed"].as<int>());
+ }
+
+ if (vm.count("threads")) {
+ ctx->settings[ctx->id("threads")] = vm["threads"].as<int>();
+ }
+
+ if (vm.count("randomize-seed")) {
+ std::random_device randDev{};
+ std::uniform_int_distribution<int> distrib{1};
+ ctx->rngseed(distrib(randDev));
+ }
+
+ if (vm.count("slack_redist_iter")) {
+ ctx->settings[ctx->id("slack_redist_iter")] = vm["slack_redist_iter"].as<int>();
+ if (vm.count("freq") && vm["freq"].as<double>() == 0) {
+ ctx->settings[ctx->id("auto_freq")] = true;
+#ifndef NO_GUI
+ if (!vm.count("gui"))
+#endif
+ log_warning("Target frequency not specified. Will optimise for max frequency.\n");
+ }
+ }
+
+ if (vm.count("ignore-loops")) {
+ ctx->settings[ctx->id("timing/ignoreLoops")] = true;
+ }
+
+ if (vm.count("timing-allow-fail")) {
+ ctx->settings[ctx->id("timing/allowFail")] = true;
+ }
+
+ if (vm.count("placer")) {
+ std::string placer = vm["placer"].as<std::string>();
+ if (std::find(Arch::availablePlacers.begin(), Arch::availablePlacers.end(), placer) ==
+ Arch::availablePlacers.end())
+ log_error("Placer algorithm '%s' is not supported (available options: %s)\n", placer.c_str(),
+ boost::algorithm::join(Arch::availablePlacers, ", ").c_str());
+ ctx->settings[ctx->id("placer")] = placer;
+ }
+
+ if (vm.count("router")) {
+ std::string router = vm["router"].as<std::string>();
+ if (std::find(Arch::availableRouters.begin(), Arch::availableRouters.end(), router) ==
+ Arch::availableRouters.end())
+ log_error("Router algorithm '%s' is not supported (available options: %s)\n", router.c_str(),
+ boost::algorithm::join(Arch::availableRouters, ", ").c_str());
+ ctx->settings[ctx->id("router")] = router;
+ }
+
+ if (vm.count("cstrweight")) {
+ ctx->settings[ctx->id("placer1/constraintWeight")] = std::to_string(vm["cstrweight"].as<float>());
+ }
+ if (vm.count("starttemp")) {
+ ctx->settings[ctx->id("placer1/startTemp")] = std::to_string(vm["starttemp"].as<float>());
+ }
+
+ if (vm.count("placer-budgets")) {
+ ctx->settings[ctx->id("placer1/budgetBased")] = true;
+ }
+ if (vm.count("freq")) {
+ auto freq = vm["freq"].as<double>();
+ if (freq > 0)
+ ctx->settings[ctx->id("target_freq")] = std::to_string(freq * 1e6);
+ }
+
+ if (vm.count("no-tmdriv"))
+ ctx->settings[ctx->id("timing_driven")] = false;
+
+ if (vm.count("placer-heap-alpha"))
+ ctx->settings[ctx->id("placerHeap/alpha")] = std::to_string(vm["placer-heap-alpha"].as<float>());
+
+ if (vm.count("placer-heap-beta"))
+ ctx->settings[ctx->id("placerHeap/beta")] = std::to_string(vm["placer-heap-beta"].as<float>());
+
+ if (vm.count("placer-heap-critexp"))
+ ctx->settings[ctx->id("placerHeap/criticalityExponent")] = std::to_string(vm["placer-heap-critexp"].as<int>());
+
+ if (vm.count("placer-heap-timingweight"))
+ ctx->settings[ctx->id("placerHeap/timingWeight")] = std::to_string(vm["placer-heap-timingweight"].as<int>());
+
+ if (vm.count("parallel-refine"))
+ ctx->settings[ctx->id("placerHeap/parallelRefine")] = true;
+
+ if (vm.count("router2-heatmap"))
+ ctx->settings[ctx->id("router2/heatmap")] = vm["router2-heatmap"].as<std::string>();
+ if (vm.count("tmg-ripup") || vm.count("router2-tmg-ripup"))
+ ctx->settings[ctx->id("router/tmg_ripup")] = true;
+
+ // Setting default values
+ if (ctx->settings.find(ctx->id("target_freq")) == ctx->settings.end())
+ ctx->settings[ctx->id("target_freq")] = std::to_string(12e6);
+ if (ctx->settings.find(ctx->id("timing_driven")) == ctx->settings.end())
+ ctx->settings[ctx->id("timing_driven")] = true;
+ if (ctx->settings.find(ctx->id("slack_redist_iter")) == ctx->settings.end())
+ ctx->settings[ctx->id("slack_redist_iter")] = 0;
+ if (ctx->settings.find(ctx->id("auto_freq")) == ctx->settings.end())
+ ctx->settings[ctx->id("auto_freq")] = false;
+ if (ctx->settings.find(ctx->id("placer")) == ctx->settings.end())
+ ctx->settings[ctx->id("placer")] = Arch::defaultPlacer;
+ if (ctx->settings.find(ctx->id("router")) == ctx->settings.end())
+ ctx->settings[ctx->id("router")] = Arch::defaultRouter;
+
+ ctx->settings[ctx->id("arch.name")] = std::string(ctx->archId().c_str(ctx));
+ ctx->settings[ctx->id("arch.type")] = std::string(ctx->archArgsToId(ctx->archArgs()).c_str(ctx));
+ ctx->settings[ctx->id("seed")] = ctx->rngstate;
+
+ if (ctx->settings.find(ctx->id("placerHeap/alpha")) == ctx->settings.end())
+ ctx->settings[ctx->id("placerHeap/alpha")] = std::to_string(0.1);
+ if (ctx->settings.find(ctx->id("placerHeap/beta")) == ctx->settings.end())
+ ctx->settings[ctx->id("placerHeap/beta")] = std::to_string(0.9);
+ if (ctx->settings.find(ctx->id("placerHeap/criticalityExponent")) == ctx->settings.end())
+ ctx->settings[ctx->id("placerHeap/criticalityExponent")] = std::to_string(2);
+ if (ctx->settings.find(ctx->id("placerHeap/timingWeight")) == ctx->settings.end())
+ ctx->settings[ctx->id("placerHeap/timingWeight")] = std::to_string(10);
+
+ if (vm.count("detailed-timing-report")) {
+ ctx->detailed_timing_report = true;
+ }
+}
+
+int CommandHandler::executeMain(std::unique_ptr<Context> ctx)
+{
+ if (vm.count("on-failure")) {
+ global_command_handler = this;
+ std::set_terminate(script_terminate_handler);
+ }
+ if (vm.count("test")) {
+ ctx->archcheck();
+ return 0;
+ }
+
+ if (vm.count("top")) {
+ ctx->settings[ctx->id("frontend/top")] = vm["top"].as<std::string>();
+ }
+
+#ifndef NO_GUI
+ if (vm.count("gui")) {
+ Application a(argc, argv, (vm.count("gui-no-aa") > 0));
+ MainWindow w(std::move(ctx), this);
+ try {
+ if (vm.count("json")) {
+ std::string filename = vm["json"].as<std::string>();
+ std::ifstream f(filename);
+ if (!parse_json(f, filename, w.getContext()))
+ log_error("Loading design failed.\n");
+ customAfterLoad(w.getContext());
+ w.notifyChangeContext();
+ w.updateActions();
+ } else
+ w.notifyChangeContext();
+ } catch (log_execution_error_exception) {
+ // show error is handled by gui itself
+ }
+ w.show();
+
+ return a.exec();
+ }
+#endif
+ if (vm.count("json")) {
+ std::string filename = vm["json"].as<std::string>();
+ std::ifstream f(filename);
+ if (!parse_json(f, filename, ctx.get()))
+ log_error("Loading design failed.\n");
+
+ customAfterLoad(ctx.get());
+ }
+
+#ifndef NO_PYTHON
+ init_python(argv[0]);
+ python_export_global("ctx", *ctx);
+
+ if (vm.count("run")) {
+
+ std::vector<std::string> files = vm["run"].as<std::vector<std::string>>();
+ for (auto filename : files)
+ execute_python_file(filename.c_str());
+ } else
+#endif
+ if (ctx->design_loaded) {
+ bool do_pack = vm.count("pack-only") != 0 || vm.count("no-pack") == 0;
+ bool do_place = vm.count("pack-only") == 0 && vm.count("no-place") == 0;
+ bool do_route = vm.count("pack-only") == 0 && vm.count("no-route") == 0;
+
+ if (do_pack) {
+ run_script_hook("pre-pack");
+ if (!ctx->pack() && !ctx->force)
+ log_error("Packing design failed.\n");
+ }
+ assign_budget(ctx.get());
+ ctx->check();
+ print_utilisation(ctx.get());
+
+ if (do_place) {
+ run_script_hook("pre-place");
+ bool saved_debug = ctx->debug;
+ if (vm.count("debug-placer"))
+ ctx->debug = true;
+ if (!ctx->place() && !ctx->force)
+ log_error("Placing design failed.\n");
+ ctx->debug = saved_debug;
+ ctx->check();
+ if (vm.count("placed-svg"))
+ ctx->writeSVG(vm["placed-svg"].as<std::string>(), "scale=50 hide_routing");
+ }
+
+ if (do_route) {
+ run_script_hook("pre-route");
+ bool saved_debug = ctx->debug;
+ if (vm.count("debug-router"))
+ ctx->debug = true;
+ if (!ctx->route() && !ctx->force)
+ log_error("Routing design failed.\n");
+ ctx->debug = saved_debug;
+ run_script_hook("post-route");
+ if (vm.count("routed-svg"))
+ ctx->writeSVG(vm["routed-svg"].as<std::string>(), "scale=500");
+ }
+
+ customBitstream(ctx.get());
+ }
+
+ if (vm.count("write")) {
+ std::string filename = vm["write"].as<std::string>();
+ std::ofstream f(filename);
+ if (!write_json_file(f, filename, ctx.get()))
+ log_error("Saving design failed.\n");
+ }
+
+ if (vm.count("sdf")) {
+ std::string filename = vm["sdf"].as<std::string>();
+ std::ofstream f(filename);
+ if (!f)
+ log_error("Failed to open SDF file '%s' for writing.\n", filename.c_str());
+ ctx->writeSDF(f, vm.count("sdf-cvc"));
+ }
+
+ if (vm.count("report")) {
+ std::string filename = vm["report"].as<std::string>();
+ std::ofstream f(filename);
+ if (!f)
+ log_error("Failed to open report file '%s' for writing.\n", filename.c_str());
+ ctx->writeReport(f);
+ }
+
+#ifndef NO_PYTHON
+ deinit_python();
+#endif
+
+ return had_nonfatal_error ? 1 : 0;
+}
+
+void CommandHandler::conflicting_options(const boost::program_options::variables_map &vm, const char *opt1,
+ const char *opt2)
+{
+ if (vm.count(opt1) && !vm[opt1].defaulted() && vm.count(opt2) && !vm[opt2].defaulted()) {
+ std::string msg = "Conflicting options '" + std::string(opt1) + "' and '" + std::string(opt2) + "'.";
+ log_error("%s\n", msg.c_str());
+ }
+}
+
+void CommandHandler::printFooter()
+{
+ int warning_count = get_or_default(message_count_by_level, LogLevel::WARNING_MSG, 0),
+ error_count = get_or_default(message_count_by_level, LogLevel::ERROR_MSG, 0);
+ if (warning_count > 0 || error_count > 0)
+ log_always("%d warning%s, %d error%s\n", warning_count, warning_count == 1 ? "" : "s", error_count,
+ error_count == 1 ? "" : "s");
+}
+
+int CommandHandler::exec()
+{
+ try {
+ if (!parseOptions())
+ return -1;
+
+ if (executeBeforeContext())
+ return 0;
+
+ dict<std::string, Property> values;
+ std::unique_ptr<Context> ctx = createContext(values);
+ setupContext(ctx.get());
+ setupArchContext(ctx.get());
+ int rc = executeMain(std::move(ctx));
+ printFooter();
+ log_break();
+ log_info("Program finished normally.\n");
+ return rc;
+ } catch (log_execution_error_exception) {
+ printFooter();
+ return -1;
+ }
+}
+
+void CommandHandler::load_json(Context *ctx, std::string filename)
+{
+ setupContext(ctx);
+ setupArchContext(ctx);
+ {
+ std::ifstream f(filename);
+ if (!parse_json(f, filename, ctx))
+ log_error("Loading design failed.\n");
+ }
+}
+
+void CommandHandler::clear() { vm.clear(); }
+
+void CommandHandler::run_script_hook(const std::string &name)
+{
+#ifndef NO_PYTHON
+ if (vm.count(name)) {
+ std::vector<std::string> files = vm[name].as<std::vector<std::string>>();
+ for (auto filename : files)
+ execute_python_file(filename.c_str());
+ }
+#endif
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/command.h b/common/kernel/command.h
new file mode 100644
index 00000000..6cce8c61
--- /dev/null
+++ b/common/kernel/command.h
@@ -0,0 +1,74 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Miodrag Milanovic <micko@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef COMMAND_H
+#define COMMAND_H
+
+#include <boost/program_options.hpp>
+#include <fstream>
+#include "log.h"
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace po = boost::program_options;
+
+class CommandHandler
+{
+ public:
+ CommandHandler(int argc, char **argv);
+ virtual ~CommandHandler(){};
+
+ int exec();
+ void load_json(Context *ctx, std::string filename);
+ void clear();
+ void run_script_hook(const std::string &name);
+
+ protected:
+ virtual void setupArchContext(Context *ctx) = 0;
+ virtual std::unique_ptr<Context> createContext(dict<std::string, Property> &values) = 0;
+ virtual po::options_description getArchOptions() = 0;
+ virtual void validate(){};
+ virtual void customAfterLoad(Context *ctx){};
+ virtual void customBitstream(Context *ctx){};
+ void conflicting_options(const boost::program_options::variables_map &vm, const char *opt1, const char *opt2);
+
+ private:
+ bool parseOptions();
+ bool executeBeforeContext();
+ void setupContext(Context *ctx);
+ int executeMain(std::unique_ptr<Context> ctx);
+ po::options_description getGeneralOptions();
+ void printFooter();
+
+ protected:
+ po::variables_map vm;
+
+ private:
+ po::options_description options;
+ po::positional_options_description pos;
+ int argc;
+ char **argv;
+ std::ofstream logfile;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif // COMMAND_H
diff --git a/common/kernel/constraints.h b/common/kernel/constraints.h
new file mode 100644
index 00000000..65abf12c
--- /dev/null
+++ b/common/kernel/constraints.h
@@ -0,0 +1,70 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 The SymbiFlow Authors.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef CONSTRAINTS_H
+#define CONSTRAINTS_H
+
+#include <cstdint>
+#include <vector>
+
+#include "archdefs.h"
+#include "exclusive_state_groups.h"
+#include "hashlib.h"
+#include "idstring.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Context;
+
+template <std::size_t StateCount, typename StateType = int8_t, typename CountType = uint8_t> struct Constraints
+{
+ using ConstraintStateType = StateType;
+ using ConstraintCountType = CountType;
+
+ enum ConstraintType
+ {
+ CONSTRAINT_TAG_IMPLIES = 0,
+ CONSTRAINT_TAG_REQUIRES = 1,
+ };
+
+ template <typename StateRange> struct Constraint
+ {
+ virtual std::size_t tag() const = 0;
+ virtual ConstraintType constraint_type() const = 0;
+ virtual StateType state() const = 0;
+ virtual StateRange states() const = 0;
+ };
+
+ typedef ExclusiveStateGroup<StateCount, StateType, CountType> TagState;
+ dict<uint32_t, std::vector<typename TagState::Definition>> definitions;
+
+ template <typename ConstraintRange> void bindBel(TagState *tags, const ConstraintRange constraints);
+
+ template <typename ConstraintRange> void unbindBel(TagState *tags, const ConstraintRange constraints);
+
+ template <typename ConstraintRange>
+ bool isValidBelForCellType(const Context *ctx, uint32_t prototype, const TagState *tags,
+ const ConstraintRange constraints, IdString object, IdString cell, BelId bel,
+ bool explain_constraints) const;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/constraints.impl.h b/common/kernel/constraints.impl.h
new file mode 100644
index 00000000..9c978411
--- /dev/null
+++ b/common/kernel/constraints.impl.h
@@ -0,0 +1,109 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 The SymbiFlow Authors.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef CONSTRAINTS_IMPL_H
+#define CONSTRAINTS_IMPL_H
+
+#include "exclusive_state_groups.impl.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+template <size_t StateCount, typename StateType, typename CountType>
+template <typename ConstraintRange>
+void Constraints<StateCount, StateType, CountType>::bindBel(TagState *tags, const ConstraintRange constraints)
+{
+ for (const auto &constraint : constraints) {
+ switch (constraint.constraint_type()) {
+ case CONSTRAINT_TAG_IMPLIES:
+ tags[constraint.tag()].add_implies(constraint.state());
+ break;
+ case CONSTRAINT_TAG_REQUIRES:
+ break;
+ default:
+ NPNR_ASSERT(false);
+ }
+ }
+}
+
+template <size_t StateCount, typename StateType, typename CountType>
+template <typename ConstraintRange>
+void Constraints<StateCount, StateType, CountType>::unbindBel(TagState *tags, const ConstraintRange constraints)
+{
+ for (const auto &constraint : constraints) {
+ switch (constraint.constraint_type()) {
+ case CONSTRAINT_TAG_IMPLIES:
+ tags[constraint.tag()].remove_implies(constraint.state());
+ break;
+ case CONSTRAINT_TAG_REQUIRES:
+ break;
+ default:
+ NPNR_ASSERT(false);
+ }
+ }
+}
+
+template <size_t StateCount, typename StateType, typename CountType>
+template <typename ConstraintRange>
+bool Constraints<StateCount, StateType, CountType>::isValidBelForCellType(const Context *ctx, uint32_t prototype,
+ const TagState *tags,
+ const ConstraintRange constraints,
+ IdString object, IdString cell, BelId bel,
+ bool explain_constraints) const
+{
+ if (explain_constraints) {
+ auto &state_definition = definitions.at(prototype);
+ for (const auto &constraint : constraints) {
+ switch (constraint.constraint_type()) {
+ case CONSTRAINT_TAG_IMPLIES:
+ tags[constraint.tag()].explain_implies(ctx, object, cell, state_definition.at(constraint.tag()), bel,
+ constraint.state());
+ break;
+ case CONSTRAINT_TAG_REQUIRES:
+ tags[constraint.tag()].explain_requires(ctx, object, cell, state_definition.at(constraint.tag()), bel,
+ constraint.states());
+ break;
+ default:
+ NPNR_ASSERT(false);
+ }
+ }
+ }
+
+ for (const auto &constraint : constraints) {
+ switch (constraint.constraint_type()) {
+ case CONSTRAINT_TAG_IMPLIES:
+ if (!tags[constraint.tag()].check_implies(constraint.state())) {
+ return false;
+ }
+ break;
+ case CONSTRAINT_TAG_REQUIRES:
+ if (!tags[constraint.tag()].requires(constraint.states())) {
+ return false;
+ }
+ break;
+ default:
+ NPNR_ASSERT(false);
+ }
+ }
+
+ return true;
+}
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/context.cc b/common/kernel/context.cc
new file mode 100644
index 00000000..e35d3e49
--- /dev/null
+++ b/common/kernel/context.cc
@@ -0,0 +1,428 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "context.h"
+
+#include "log.h"
+#include "nextpnr_namespaces.h"
+#include "util.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+WireId Context::getNetinfoSourceWire(const NetInfo *net_info) const
+{
+ if (net_info->driver.cell == nullptr)
+ return WireId();
+
+ auto src_bel = net_info->driver.cell->bel;
+
+ if (src_bel == BelId())
+ return WireId();
+
+ auto bel_pins = getBelPinsForCellPin(net_info->driver.cell, net_info->driver.port);
+ auto iter = bel_pins.begin();
+ if (iter == bel_pins.end())
+ return WireId();
+ WireId driver = getBelPinWire(src_bel, *iter);
+ ++iter;
+ NPNR_ASSERT(iter == bel_pins.end()); // assert there is only one driver bel pin;
+ return driver;
+}
+
+SSOArray<WireId, 2> Context::getNetinfoSinkWires(const NetInfo *net_info, const PortRef &user_info) const
+{
+ auto dst_bel = user_info.cell->bel;
+ if (dst_bel == BelId())
+ return SSOArray<WireId, 2>(0, WireId());
+ size_t bel_pin_count = 0;
+ // We use an SSOArray here because it avoids any heap allocation for the 99.9% case of 1 or 2 sink wires
+ // but as SSOArray doesn't (currently) support resizing to keep things simple it does mean we have to do
+ // two loops
+ for (auto s : getBelPinsForCellPin(user_info.cell, user_info.port)) {
+ (void)s; // unused
+ ++bel_pin_count;
+ }
+ SSOArray<WireId, 2> result(bel_pin_count, WireId());
+ bel_pin_count = 0;
+ for (auto pin : getBelPinsForCellPin(user_info.cell, user_info.port)) {
+ result[bel_pin_count++] = getBelPinWire(dst_bel, pin);
+ }
+ return result;
+}
+
+size_t Context::getNetinfoSinkWireCount(const NetInfo *net_info, const PortRef &sink) const
+{
+ size_t count = 0;
+ for (auto s : getNetinfoSinkWires(net_info, sink)) {
+ (void)s; // unused
+ ++count;
+ }
+ return count;
+}
+
+WireId Context::getNetinfoSinkWire(const NetInfo *net_info, const PortRef &sink, size_t phys_idx) const
+{
+ size_t count = 0;
+ for (auto s : getNetinfoSinkWires(net_info, sink)) {
+ if (count == phys_idx)
+ return s;
+ ++count;
+ }
+ /* TODO: This should be an assertion failure, but for the zero-wire case of unplaced sinks; legacy code currently
+ assumes WireId Remove once the refactoring process is complete.
+ */
+ return WireId();
+}
+
+delay_t Context::predictArcDelay(const NetInfo *net_info, const PortRef &sink) const
+{
+ if (net_info->driver.cell == nullptr || net_info->driver.cell->bel == BelId() || sink.cell->bel == BelId())
+ return 0;
+ IdString driver_pin, sink_pin;
+ // Pick the first pin for a prediction; assume all will be similar enouhg
+ for (auto pin : getBelPinsForCellPin(net_info->driver.cell, net_info->driver.port)) {
+ driver_pin = pin;
+ break;
+ }
+ for (auto pin : getBelPinsForCellPin(sink.cell, sink.port)) {
+ sink_pin = pin;
+ break;
+ }
+ if (driver_pin == IdString() || sink_pin == IdString())
+ return 0;
+ return predictDelay(net_info->driver.cell->bel, driver_pin, sink.cell->bel, sink_pin);
+}
+
+delay_t Context::getNetinfoRouteDelay(const NetInfo *net_info, const PortRef &user_info) const
+{
+#ifdef ARCH_ECP5
+ if (net_info->is_global)
+ return 0;
+#endif
+
+ if (net_info->wires.empty())
+ return predictArcDelay(net_info, user_info);
+
+ WireId src_wire = getNetinfoSourceWire(net_info);
+ if (src_wire == WireId())
+ return 0;
+
+ delay_t max_delay = 0;
+
+ for (auto dst_wire : getNetinfoSinkWires(net_info, user_info)) {
+ WireId cursor = dst_wire;
+ delay_t delay = 0;
+
+ while (cursor != WireId() && cursor != src_wire) {
+ auto it = net_info->wires.find(cursor);
+
+ if (it == net_info->wires.end())
+ break;
+
+ PipId pip = it->second.pip;
+ if (pip == PipId())
+ break;
+
+ delay += getPipDelay(pip).maxDelay();
+ delay += getWireDelay(cursor).maxDelay();
+ cursor = getPipSrcWire(pip);
+ }
+
+ if (cursor == src_wire)
+ max_delay = std::max(max_delay, delay + getWireDelay(src_wire).maxDelay()); // routed
+ else
+ max_delay = std::max(max_delay, predictArcDelay(net_info, user_info)); // unrouted
+ }
+ return max_delay;
+}
+
+static uint32_t xorshift32(uint32_t x)
+{
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ return x;
+}
+
+uint32_t Context::checksum() const
+{
+ uint32_t cksum = xorshift32(123456789);
+
+ uint32_t cksum_nets_sum = 0;
+ for (auto &it : nets) {
+ auto &ni = *it.second;
+ uint32_t x = 123456789;
+ x = xorshift32(x + xorshift32(it.first.index));
+ x = xorshift32(x + xorshift32(ni.name.index));
+ if (ni.driver.cell)
+ x = xorshift32(x + xorshift32(ni.driver.cell->name.index));
+ x = xorshift32(x + xorshift32(ni.driver.port.index));
+ x = xorshift32(x + xorshift32(getDelayChecksum(ni.driver.budget)));
+
+ for (auto &u : ni.users) {
+ if (u.cell)
+ x = xorshift32(x + xorshift32(u.cell->name.index));
+ x = xorshift32(x + xorshift32(u.port.index));
+ x = xorshift32(x + xorshift32(getDelayChecksum(u.budget)));
+ }
+
+ uint32_t attr_x_sum = 0;
+ for (auto &a : ni.attrs) {
+ uint32_t attr_x = 123456789;
+ attr_x = xorshift32(attr_x + xorshift32(a.first.index));
+ for (char ch : a.second.str)
+ attr_x = xorshift32(attr_x + xorshift32((int)ch));
+ attr_x_sum += attr_x;
+ }
+ x = xorshift32(x + xorshift32(attr_x_sum));
+
+ uint32_t wire_x_sum = 0;
+ for (auto &w : ni.wires) {
+ uint32_t wire_x = 123456789;
+ wire_x = xorshift32(wire_x + xorshift32(getWireChecksum(w.first)));
+ wire_x = xorshift32(wire_x + xorshift32(getPipChecksum(w.second.pip)));
+ wire_x = xorshift32(wire_x + xorshift32(int(w.second.strength)));
+ wire_x_sum += wire_x;
+ }
+ x = xorshift32(x + xorshift32(wire_x_sum));
+
+ cksum_nets_sum += x;
+ }
+ cksum = xorshift32(cksum + xorshift32(cksum_nets_sum));
+
+ uint32_t cksum_cells_sum = 0;
+ for (auto &it : cells) {
+ auto &ci = *it.second;
+ uint32_t x = 123456789;
+ x = xorshift32(x + xorshift32(it.first.index));
+ x = xorshift32(x + xorshift32(ci.name.index));
+ x = xorshift32(x + xorshift32(ci.type.index));
+
+ uint32_t port_x_sum = 0;
+ for (auto &p : ci.ports) {
+ uint32_t port_x = 123456789;
+ port_x = xorshift32(port_x + xorshift32(p.first.index));
+ port_x = xorshift32(port_x + xorshift32(p.second.name.index));
+ if (p.second.net)
+ port_x = xorshift32(port_x + xorshift32(p.second.net->name.index));
+ port_x = xorshift32(port_x + xorshift32(p.second.type));
+ port_x_sum += port_x;
+ }
+ x = xorshift32(x + xorshift32(port_x_sum));
+
+ uint32_t attr_x_sum = 0;
+ for (auto &a : ci.attrs) {
+ uint32_t attr_x = 123456789;
+ attr_x = xorshift32(attr_x + xorshift32(a.first.index));
+ for (char ch : a.second.str)
+ attr_x = xorshift32(attr_x + xorshift32((int)ch));
+ attr_x_sum += attr_x;
+ }
+ x = xorshift32(x + xorshift32(attr_x_sum));
+
+ uint32_t param_x_sum = 0;
+ for (auto &p : ci.params) {
+ uint32_t param_x = 123456789;
+ param_x = xorshift32(param_x + xorshift32(p.first.index));
+ for (char ch : p.second.str)
+ param_x = xorshift32(param_x + xorshift32((int)ch));
+ param_x_sum += param_x;
+ }
+ x = xorshift32(x + xorshift32(param_x_sum));
+
+ x = xorshift32(x + xorshift32(getBelChecksum(ci.bel)));
+ x = xorshift32(x + xorshift32(ci.belStrength));
+
+ cksum_cells_sum += x;
+ }
+ cksum = xorshift32(cksum + xorshift32(cksum_cells_sum));
+
+ return cksum;
+}
+
+void Context::check() const
+{
+ bool check_failed = false;
+
+#define CHECK_FAIL(...) \
+ do { \
+ log_nonfatal_error(__VA_ARGS__); \
+ check_failed = true; \
+ } while (false)
+
+ for (auto &n : nets) {
+ auto ni = n.second.get();
+ if (n.first != ni->name)
+ CHECK_FAIL("net key '%s' not equal to name '%s'\n", nameOf(n.first), nameOf(ni->name));
+ for (auto &w : ni->wires) {
+ if (ni != getBoundWireNet(w.first))
+ CHECK_FAIL("net '%s' not bound to wire '%s' in wires map\n", nameOf(n.first), nameOfWire(w.first));
+ if (w.second.pip != PipId()) {
+ if (w.first != getPipDstWire(w.second.pip))
+ CHECK_FAIL("net '%s' has dest mismatch '%s' vs '%s' in for pip '%s'\n", nameOf(n.first),
+ nameOfWire(w.first), nameOfWire(getPipDstWire(w.second.pip)), nameOfPip(w.second.pip));
+ if (ni != getBoundPipNet(w.second.pip))
+ CHECK_FAIL("net '%s' not bound to pip '%s' in wires map\n", nameOf(n.first),
+ nameOfPip(w.second.pip));
+ }
+ }
+ if (ni->driver.cell != nullptr) {
+ if (!ni->driver.cell->ports.count(ni->driver.port)) {
+ CHECK_FAIL("net '%s' driver port '%s' missing on cell '%s'\n", nameOf(n.first), nameOf(ni->driver.port),
+ nameOf(ni->driver.cell));
+ } else {
+ const NetInfo *p_net = ni->driver.cell->ports.at(ni->driver.port).net;
+ if (p_net != ni)
+ CHECK_FAIL("net '%s' driver port '%s.%s' connected to incorrect net '%s'\n", nameOf(n.first),
+ nameOf(ni->driver.cell), nameOf(ni->driver.port), p_net ? nameOf(p_net) : "<nullptr>");
+ }
+ }
+ for (auto user : ni->users) {
+ if (!user.cell->ports.count(user.port)) {
+ CHECK_FAIL("net '%s' user port '%s' missing on cell '%s'\n", nameOf(n.first), nameOf(user.port),
+ nameOf(user.cell));
+ } else {
+ const NetInfo *p_net = user.cell->ports.at(user.port).net;
+ if (p_net != ni)
+ CHECK_FAIL("net '%s' user port '%s.%s' connected to incorrect net '%s'\n", nameOf(n.first),
+ nameOf(user.cell), nameOf(user.port), p_net ? nameOf(p_net) : "<nullptr>");
+ }
+ }
+ }
+#ifdef CHECK_WIRES
+ for (auto w : getWires()) {
+ auto ni = getBoundWireNet(w);
+ if (ni != nullptr) {
+ if (!ni->wires.count(w))
+ CHECK_FAIL("wire '%s' missing in wires map of bound net '%s'\n", nameOfWire(w), nameOf(ni));
+ }
+ }
+#endif
+ for (auto &c : cells) {
+ auto ci = c.second.get();
+ if (c.first != ci->name)
+ CHECK_FAIL("cell key '%s' not equal to name '%s'\n", nameOf(c.first), nameOf(ci->name));
+ if (ci->bel != BelId()) {
+ if (getBoundBelCell(c.second->bel) != ci)
+ CHECK_FAIL("cell '%s' not bound to bel '%s' in bel field\n", nameOf(c.first), nameOfBel(ci->bel));
+ }
+ for (auto &port : c.second->ports) {
+ NetInfo *net = port.second.net;
+ if (net != nullptr) {
+ if (nets.find(net->name) == nets.end()) {
+ CHECK_FAIL("cell port '%s.%s' connected to non-existent net '%s'\n", nameOf(c.first),
+ nameOf(port.first), nameOf(net->name));
+ } else if (port.second.type == PORT_OUT) {
+ if (net->driver.cell != c.second.get() || net->driver.port != port.first) {
+ CHECK_FAIL("output cell port '%s.%s' not in driver field of net '%s'\n", nameOf(c.first),
+ nameOf(port.first), nameOf(net));
+ }
+ } else if (port.second.type == PORT_IN) {
+ if (!port.second.user_idx)
+ CHECK_FAIL("input cell port '%s.%s' on net '%s' has no user index\n", nameOf(c.first),
+ nameOf(port.first), nameOf(net));
+ auto net_user = net->users.at(port.second.user_idx);
+ if (net_user.cell != c.second.get() || net_user.port != port.first)
+ CHECK_FAIL("input cell port '%s.%s' not in associated user entry of net '%s'\n",
+ nameOf(c.first), nameOf(port.first), nameOf(net));
+ }
+ }
+ }
+ }
+
+#undef CHECK_FAIL
+
+ if (check_failed)
+ log_error("INTERNAL CHECK FAILED: please report this error with the design and full log output. Failure "
+ "details are above this message.\n");
+}
+
+namespace {
+struct FixupHierarchyWorker
+{
+ FixupHierarchyWorker(Context *ctx) : ctx(ctx){};
+ Context *ctx;
+ void run()
+ {
+ trim_hierarchy(ctx->top_module);
+ rebuild_hierarchy();
+ };
+ // Remove cells and nets that no longer exist in the netlist
+ std::vector<IdString> todelete_cells, todelete_nets;
+ void trim_hierarchy(IdString path)
+ {
+ auto &h = ctx->hierarchy.at(path);
+ todelete_cells.clear();
+ todelete_nets.clear();
+ for (auto &lc : h.leaf_cells) {
+ if (!ctx->cells.count(lc.second))
+ todelete_cells.push_back(lc.first);
+ }
+ for (auto &n : h.nets)
+ if (!ctx->nets.count(n.second))
+ todelete_nets.push_back(n.first);
+ for (auto tdc : todelete_cells) {
+ h.leaf_cells_by_gname.erase(h.leaf_cells.at(tdc));
+ h.leaf_cells.erase(tdc);
+ }
+ for (auto tdn : todelete_nets) {
+ h.nets_by_gname.erase(h.nets.at(tdn));
+ h.nets.erase(tdn);
+ }
+ for (auto &sc : h.hier_cells)
+ trim_hierarchy(sc.second);
+ }
+
+ IdString construct_local_name(HierarchicalCell &hc, IdString global_name, bool is_cell)
+ {
+ std::string gn = global_name.str(ctx);
+ auto dp = gn.find_last_of('.');
+ if (dp != std::string::npos)
+ gn = gn.substr(dp + 1);
+ IdString name = ctx->id(gn);
+ // Make sure name is unique
+ int adder = 0;
+ while (is_cell ? hc.leaf_cells.count(name) : hc.nets.count(name)) {
+ ++adder;
+ name = ctx->id(gn + "$" + std::to_string(adder));
+ }
+ return name;
+ }
+
+ // Update hierarchy structure for nets and cells that have hiercell set
+ void rebuild_hierarchy()
+ {
+ for (auto &cell : ctx->cells) {
+ CellInfo *ci = cell.second.get();
+ if (ci->hierpath == IdString())
+ ci->hierpath = ctx->top_module;
+ auto &hc = ctx->hierarchy.at(ci->hierpath);
+ if (hc.leaf_cells_by_gname.count(ci->name))
+ continue; // already known
+ IdString local_name = construct_local_name(hc, ci->name, true);
+ hc.leaf_cells_by_gname[ci->name] = local_name;
+ hc.leaf_cells[local_name] = ci->name;
+ }
+ }
+};
+} // namespace
+
+void Context::fixupHierarchy() { FixupHierarchyWorker(this).run(); }
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/context.h b/common/kernel/context.h
new file mode 100644
index 00000000..cb8fd257
--- /dev/null
+++ b/common/kernel/context.h
@@ -0,0 +1,119 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <boost/lexical_cast.hpp>
+
+#include "arch.h"
+#include "deterministic_rng.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Context : Arch, DeterministicRNG
+{
+ bool verbose = false;
+ bool debug = false;
+ bool force = false;
+
+ // Should we disable printing of the location of nets in the critical path?
+ bool disable_critical_path_source_print = false;
+ // True when detailed per-net timing is to be stored / reported
+ bool detailed_timing_report = false;
+
+ ArchArgs arch_args;
+
+ Context(ArchArgs args) : Arch(args)
+ {
+ BaseCtx::as_ctx = this;
+ arch_args = args;
+ }
+
+ ArchArgs getArchArgs() { return arch_args; }
+
+ // --------------------------------------------------------------
+
+ delay_t predictArcDelay(const NetInfo *net_info, const PortRef &sink) const;
+
+ WireId getNetinfoSourceWire(const NetInfo *net_info) const;
+ SSOArray<WireId, 2> getNetinfoSinkWires(const NetInfo *net_info, const PortRef &sink) const;
+ size_t getNetinfoSinkWireCount(const NetInfo *net_info, const PortRef &sink) const;
+ WireId getNetinfoSinkWire(const NetInfo *net_info, const PortRef &sink, size_t phys_idx) const;
+ delay_t getNetinfoRouteDelay(const NetInfo *net_info, const PortRef &sink) const;
+
+ // provided by router1.cc
+ bool checkRoutedDesign() const;
+ bool getActualRouteDelay(WireId src_wire, WireId dst_wire, delay_t *delay = nullptr,
+ dict<WireId, PipId> *route = nullptr, bool useEstimate = true);
+
+ // --------------------------------------------------------------
+ // call after changing hierpath or adding/removing nets and cells
+ void fixupHierarchy();
+
+ // --------------------------------------------------------------
+
+ // provided by sdf.cc
+ void writeSDF(std::ostream &out, bool cvc_mode = false) const;
+
+ // --------------------------------------------------------------
+
+ // provided by svg.cc
+ void writeSVG(const std::string &filename, const std::string &flags = "") const;
+
+ // --------------------------------------------------------------
+
+ // provided by report.cc
+ void writeReport(std::ostream &out) const;
+ // --------------------------------------------------------------
+
+ uint32_t checksum() const;
+
+ void check() const;
+ void archcheck() const;
+
+ template <typename T> T setting(const char *name, T defaultValue)
+ {
+ IdString new_id = id(name);
+ auto found = settings.find(new_id);
+ if (found != settings.end())
+ return boost::lexical_cast<T>(found->second.is_string ? found->second.as_string()
+ : std::to_string(found->second.as_int64()));
+ else
+ settings[id(name)] = std::to_string(defaultValue);
+
+ return defaultValue;
+ }
+
+ template <typename T> T setting(const char *name) const
+ {
+ IdString new_id = id(name);
+ auto found = settings.find(new_id);
+ if (found != settings.end())
+ return boost::lexical_cast<T>(found->second.is_string ? found->second.as_string()
+ : std::to_string(found->second.as_int64()));
+ else
+ throw std::runtime_error("settings does not exists");
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* CONTEXT_H */
diff --git a/common/kernel/design_utils.cc b/common/kernel/design_utils.cc
new file mode 100644
index 00000000..f52cc304
--- /dev/null
+++ b/common/kernel/design_utils.cc
@@ -0,0 +1,52 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "design_utils.h"
+#include <algorithm>
+#include <map>
+#include "log.h"
+#include "util.h"
+NEXTPNR_NAMESPACE_BEGIN
+
+// Print utilisation of a design
+void print_utilisation(const Context *ctx)
+{
+ // Sort by Bel type
+ std::map<IdString, int> used_types;
+ for (auto &cell : ctx->cells) {
+ used_types[ctx->getBelBucketName(ctx->getBelBucketForCellType(cell.second.get()->type))]++;
+ }
+ std::map<IdString, int> available_types;
+ for (auto bel : ctx->getBels()) {
+ if (!ctx->getBelHidden(bel)) {
+ available_types[ctx->getBelBucketName(ctx->getBelBucketForBel(bel))]++;
+ }
+ }
+ log_break();
+ log_info("Device utilisation:\n");
+ for (auto type : available_types) {
+ IdString type_id = type.first;
+ int used_bels = get_or_default(used_types, type.first, 0);
+ log_info("\t%20s: %5d/%5d %5d%%\n", type_id.c_str(ctx), used_bels, type.second, 100 * used_bels / type.second);
+ }
+ log_break();
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/design_utils.h b/common/kernel/design_utils.h
new file mode 100644
index 00000000..069600b5
--- /dev/null
+++ b/common/kernel/design_utils.h
@@ -0,0 +1,100 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "nextpnr.h"
+
+#ifndef DESIGN_UTILS_H
+#define DESIGN_UTILS_H
+
+#include <algorithm>
+
+NEXTPNR_NAMESPACE_BEGIN
+
+/*
+Utilities for design manipulation, intended for use inside packing algorithms
+ */
+
+// Disconnect a net (if connected) from old, and connect it to rep
+void replace_port(CellInfo *old_cell, IdString old_name, CellInfo *rep_cell, IdString rep_name);
+
+// If a net drives a given port of a cell matching a predicate (in many
+// cases more than one cell type, e.g. SB_DFFxx so a predicate is used), return
+// the first instance of that cell (otherwise nullptr). If exclusive is set to
+// true, then this cell must be the only load. If ignore_cell is set, that cell
+// is not considered
+template <typename F1>
+CellInfo *net_only_drives(const Context *ctx, NetInfo *net, F1 cell_pred, IdString port, bool exclusive = false,
+ CellInfo *exclude = nullptr)
+{
+ if (net == nullptr)
+ return nullptr;
+ if (exclusive) {
+ if (exclude == nullptr) {
+ if (net->users.entries() != 1)
+ return nullptr;
+ } else {
+ if (net->users.entries() > 2) {
+ return nullptr;
+ } else if (net->users.entries() == 2) {
+ bool found = false;
+ for (auto &usr : net->users) {
+ if (usr.cell == exclude)
+ found = true;
+ }
+ if (!found)
+ return nullptr;
+ }
+ }
+ }
+ for (const auto &load : net->users) {
+ if (load.cell != exclude && cell_pred(ctx, load.cell) && load.port == port) {
+ return load.cell;
+ }
+ }
+ return nullptr;
+}
+
+// If a net is driven by a given port of a cell matching a predicate, return
+// that cell, otherwise nullptr
+template <typename F1> CellInfo *net_driven_by(const Context *ctx, const NetInfo *net, F1 cell_pred, IdString port)
+{
+ if (net == nullptr)
+ return nullptr;
+ if (net->driver.cell == nullptr)
+ return nullptr;
+ if (cell_pred(ctx, net->driver.cell) && net->driver.port == port) {
+ return net->driver.cell;
+ } else {
+ return nullptr;
+ }
+}
+
+// Check if a port is used
+inline bool port_used(CellInfo *cell, IdString port_name)
+{
+ auto port_fnd = cell->ports.find(port_name);
+ return port_fnd != cell->ports.end() && port_fnd->second.net != nullptr;
+}
+
+void print_utilisation(const Context *ctx);
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/deterministic_rng.h b/common/kernel/deterministic_rng.h
new file mode 100644
index 00000000..3aab5a49
--- /dev/null
+++ b/common/kernel/deterministic_rng.h
@@ -0,0 +1,103 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef DETERMINISTIC_RNG_H
+#define DETERMINISTIC_RNG_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct DeterministicRNG
+{
+ uint64_t rngstate;
+
+ DeterministicRNG() : rngstate(0x3141592653589793) {}
+
+ uint64_t rng64()
+ {
+ // xorshift64star
+ // https://arxiv.org/abs/1402.6246
+
+ uint64_t retval = rngstate * 0x2545F4914F6CDD1D;
+
+ rngstate ^= rngstate >> 12;
+ rngstate ^= rngstate << 25;
+ rngstate ^= rngstate >> 27;
+
+ return retval;
+ }
+
+ int rng() { return rng64() & 0x3fffffff; }
+
+ int rng(int n)
+ {
+ assert(n > 0);
+
+ // round up to power of 2
+ int m = n - 1;
+ m |= (m >> 1);
+ m |= (m >> 2);
+ m |= (m >> 4);
+ m |= (m >> 8);
+ m |= (m >> 16);
+ m += 1;
+
+ while (1) {
+ int x = rng64() & (m - 1);
+ if (x < n)
+ return x;
+ }
+ }
+
+ void rngseed(uint64_t seed)
+ {
+ rngstate = seed ? seed : 0x3141592653589793;
+ for (int i = 0; i < 5; i++)
+ rng64();
+ }
+
+ template <typename Iter> void shuffle(const Iter &begin, const Iter &end)
+ {
+ std::size_t size = end - begin;
+ for (std::size_t i = 0; i != size; i++) {
+ std::size_t j = i + rng(size - i);
+ if (j > i)
+ std::swap(*(begin + i), *(begin + j));
+ }
+ }
+
+ template <typename T> void shuffle(std::vector<T> &a) { shuffle(a.begin(), a.end()); }
+
+ template <typename T> void sorted_shuffle(std::vector<T> &a)
+ {
+ std::sort(a.begin(), a.end());
+ shuffle(a);
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/dynamic_bitarray.h b/common/kernel/dynamic_bitarray.h
new file mode 100644
index 00000000..be41835b
--- /dev/null
+++ b/common/kernel/dynamic_bitarray.h
@@ -0,0 +1,211 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 Symbiflow Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef DYNAMIC_BITARRAY_H
+#define DYNAMIC_BITARRAY_H
+
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+#include "log.h"
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// This class implements a simple dynamic bitarray, backed by some resizable
+// random access storage. The default is to use a std::vector<uint8_t>.
+template <typename Storage = std::vector<uint8_t>> class DynamicBitarray
+{
+ public:
+ static_assert(!std::numeric_limits<typename Storage::value_type>::is_signed, "Storage must be unsigned!");
+
+ void fill(bool value)
+ {
+ std::fill(storage.begin(), storage.end(), value ? std::numeric_limits<typename Storage::value_type>::max() : 0);
+ }
+
+ constexpr size_t bits_per_value() const { return std::numeric_limits<typename Storage::value_type>::digits; }
+
+ bool get(size_t bit) const
+ {
+ size_t element_index = bit / bits_per_value();
+ size_t bit_offset = bit % bits_per_value();
+
+ auto element = storage.at(element_index);
+ return (element & (1 << bit_offset)) != 0;
+ }
+
+ void set(size_t bit, bool value)
+ {
+ size_t element_index = bit / bits_per_value();
+ size_t bit_offset = bit % bits_per_value();
+
+ if (value) {
+ storage.at(element_index) |= (1 << bit_offset);
+ } else {
+ storage.at(element_index) &= ~(1 << bit_offset);
+ }
+ }
+
+ void resize(size_t number_bits)
+ {
+ size_t required_storage = (number_bits + bits_per_value() - 1) / bits_per_value();
+ storage.resize(required_storage);
+ }
+
+ size_t size() const { return storage.size() * bits_per_value(); }
+
+ void clear() { return storage.clear(); }
+
+ // Convert IntType to a DynamicBitarray of sufficent width
+ template <typename IntType> static DynamicBitarray<Storage> to_bitarray(const IntType &value)
+ {
+ if (std::numeric_limits<IntType>::is_signed) {
+ if (value < 0) {
+ log_error("Expected position value, got %s\n", std::to_string(value).c_str());
+ }
+ }
+
+ DynamicBitarray<Storage> result;
+ result.resize(std::numeric_limits<IntType>::digits);
+ result.fill(false);
+
+ // Use a 1 of the right type (for shifting)
+ IntType one(1);
+
+ for (size_t i = 0; i < std::numeric_limits<IntType>::digits; ++i) {
+ if ((value & (one << i)) != 0) {
+ result.set(i, true);
+ }
+ }
+
+ return result;
+ }
+
+ // Convert binary bitstring to a DynamicBitarray of sufficent width
+ //
+ // string must be satisfy the following regex:
+ //
+ // [01]+
+ //
+ // width can either be specified explicitly, or -1 to use a size wide
+ // enough to store the given string.
+ //
+ // If the width is specified and the width is insufficent it will result
+ // in an error.
+ static DynamicBitarray<Storage> parse_binary_bitstring(int width, const std::string &bits)
+ {
+ NPNR_ASSERT(width == -1 || width > 0);
+
+ DynamicBitarray<Storage> result;
+ // If no width was supplied, use the width from the input data.
+ if (width == -1) {
+ width = bits.size();
+ }
+
+ NPNR_ASSERT(width >= 0);
+ if ((size_t)width < bits.size()) {
+ log_error("String '%s' is wider than specified width %d\n", bits.c_str(), width);
+ }
+ result.resize(width);
+ result.fill(false);
+
+ for (size_t i = 0; i < bits.size(); ++i) {
+ // bits[0] is the MSB!
+ size_t index = width - 1 - i;
+ if (!(bits[i] == '1' || bits[i] == '0')) {
+ log_error("String '%s' is not a valid binary bitstring?\n", bits.c_str());
+ }
+ result.set(index, bits[i] == '1');
+ }
+
+ return result;
+ }
+
+ // Convert hex bitstring to a DynamicBitarray of sufficent width
+ //
+ // string must be satisfy the following regex:
+ //
+ // [0-9a-fA-F]+
+ //
+ // width can either be specified explicitly, or -1 to use a size wide
+ // enough to store the given string.
+ //
+ // If the width is specified and the width is insufficent it will result
+ // in an error.
+ static DynamicBitarray<Storage> parse_hex_bitstring(int width, const std::string &bits)
+ {
+ NPNR_ASSERT(width == -1 || width > 0);
+
+ DynamicBitarray<Storage> result;
+ // If no width was supplied, use the width from the input data.
+ if (width == -1) {
+ // Each character is 4 bits!
+ width = bits.size() * 4;
+ }
+
+ NPNR_ASSERT(width >= 0);
+ int rem = width % 4;
+ size_t check_width = width;
+ if (rem != 0) {
+ check_width += (4 - rem);
+ }
+ if (check_width < bits.size() * 4) {
+ log_error("String '%s' is wider than specified width %d (check_width = %zu)\n", bits.c_str(), width,
+ check_width);
+ }
+
+ result.resize(width);
+ result.fill(false);
+
+ size_t index = 0;
+ for (auto nibble_iter = bits.rbegin(); nibble_iter != bits.rend(); ++nibble_iter) {
+ char nibble = *nibble_iter;
+
+ int value;
+ if (nibble >= '0' && nibble <= '9') {
+ value = nibble - '0';
+ } else if (nibble >= 'a' && nibble <= 'f') {
+ value = 10 + (nibble - 'a');
+ } else if (nibble >= 'A' && nibble <= 'F') {
+ value = 10 + (nibble - 'A');
+ } else {
+ log_error("Invalid hex string '%s'?\n", bits.c_str());
+ }
+ NPNR_ASSERT(value >= 0);
+ NPNR_ASSERT(value < 16);
+
+ // Insert nibble into bitarray.
+ for (size_t i = 0; i < 4; ++i) {
+ result.set(index++, (value & (1 << i)) != 0);
+ }
+ }
+
+ return result;
+ }
+
+ private:
+ Storage storage;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* DYNAMIC_BITARRAY_H */
diff --git a/common/kernel/embed.cc b/common/kernel/embed.cc
new file mode 100644
index 00000000..70bbc6fb
--- /dev/null
+++ b/common/kernel/embed.cc
@@ -0,0 +1,49 @@
+#if defined(WIN32)
+#include <windows.h>
+#endif
+#include <boost/filesystem.hpp>
+#include <boost/iostreams/device/mapped_file.hpp>
+#include "embed.h"
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+#if defined(EXTERNAL_CHIPDB_ROOT)
+
+const void *get_chipdb(const std::string &filename)
+{
+ static std::map<std::string, boost::iostreams::mapped_file> files;
+ if (!files.count(filename)) {
+ std::string full_filename = EXTERNAL_CHIPDB_ROOT "/" + filename;
+ if (boost::filesystem::exists(full_filename))
+ files[filename].open(full_filename, boost::iostreams::mapped_file::priv);
+ }
+ if (files.count(filename))
+ return files.at(filename).data();
+ return nullptr;
+}
+
+#elif defined(WIN32)
+
+const void *get_chipdb(const std::string &filename)
+{
+ HRSRC rc = ::FindResource(nullptr, filename.c_str(), RT_RCDATA);
+ HGLOBAL rcData = ::LoadResource(nullptr, rc);
+ return ::LockResource(rcData);
+}
+
+#else
+
+EmbeddedFile *EmbeddedFile::head = nullptr;
+
+const void *get_chipdb(const std::string &filename)
+{
+ for (EmbeddedFile *file = EmbeddedFile::head; file; file = file->next)
+ if (file->filename == filename)
+ return file->content;
+ return nullptr;
+}
+
+#endif
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/embed.h b/common/kernel/embed.h
new file mode 100644
index 00000000..5f2754f8
--- /dev/null
+++ b/common/kernel/embed.h
@@ -0,0 +1,49 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2020 whitequark <whitequark@whitequark.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef EMBED_H
+#define EMBED_H
+
+#include "nextpnr.h"
+NEXTPNR_NAMESPACE_BEGIN
+
+#if !defined(EXTERNAL_CHIPDB_ROOT) && !defined(WIN32)
+
+struct EmbeddedFile
+{
+ static EmbeddedFile *head;
+
+ std::string filename;
+ const void *content;
+ EmbeddedFile *next = nullptr;
+
+ EmbeddedFile(const std::string &filename, const void *content) : filename(filename), content(content)
+ {
+ next = head;
+ head = this;
+ }
+};
+
+#endif
+
+const void *get_chipdb(const std::string &filename);
+
+NEXTPNR_NAMESPACE_END
+
+#endif // EMBED_H
diff --git a/common/kernel/exclusive_state_groups.h b/common/kernel/exclusive_state_groups.h
new file mode 100644
index 00000000..68ce7c4e
--- /dev/null
+++ b/common/kernel/exclusive_state_groups.h
@@ -0,0 +1,154 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 The SymbiFlow Authors.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef EXCLUSIVE_STATE_GROUPS_H
+#define EXCLUSIVE_STATE_GROUPS_H
+
+#include <array>
+#include <bitset>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+#include "archdefs.h"
+#include "bits.h"
+#include "idstring.h"
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// Implementation for exclusive state groups, used to implement generic
+// constraint system.
+template <size_t StateCount, typename StateType = int8_t, typename CountType = uint8_t> struct ExclusiveStateGroup
+{
+ ExclusiveStateGroup() : state(kNoSelected) { count.fill(0); }
+ struct Definition
+ {
+ IdString prefix;
+ IdString default_state;
+ std::vector<IdString> states;
+ };
+
+ static_assert(StateCount < std::numeric_limits<StateType>::max(), "StateType cannot store max StateType");
+ static_assert(std::numeric_limits<StateType>::is_signed, "StateType must be signed");
+
+ std::bitset<StateCount> selected_states;
+ StateType state;
+ std::array<CountType, StateCount> count;
+
+ static constexpr StateType kNoSelected = -1;
+ static constexpr StateType kOverConstrained = -2;
+
+ std::pair<bool, IdString> current_state(const Definition &definition) const
+ {
+ if (state <= 0) {
+ return std::make_pair(state == kNoSelected, definition.default_state);
+ } else {
+ NPNR_ASSERT(state <= definition.states.size());
+ return std::make_pair(true, definition.states[state]);
+ }
+ }
+
+ bool check_implies(int32_t next_state) const
+ {
+ // Implies can be satified if either that state is
+ // selected, or no state is currently selected.
+ return state == next_state || state == kNoSelected;
+ }
+
+ bool add_implies(int32_t next_state)
+ {
+ NPNR_ASSERT(next_state >= 0 && (size_t)next_state < StateCount);
+
+ // Increment and mark the state as selected.
+ count[next_state] += 1;
+ selected_states[next_state] = true;
+
+ if (state == next_state) {
+ // State was already selected, state group is still satified.
+ return true;
+ } else if (selected_states.count() == 1) {
+ // State was not select selected, state is now selected.
+ // State group is satified.
+ state = next_state;
+ return true;
+ } else {
+ // State group is now overconstrained.
+ state = kOverConstrained;
+ return false;
+ }
+ };
+
+ void remove_implies(int32_t next_state)
+ {
+ NPNR_ASSERT(next_state >= 0 && (size_t)next_state < StateCount);
+ NPNR_ASSERT(selected_states[next_state]);
+
+ count[next_state] -= 1;
+ NPNR_ASSERT(count[next_state] >= 0);
+
+ // Check if next_state is now unselected.
+ if (count[next_state] == 0) {
+ // next_state is not longer selected
+ selected_states[next_state] = false;
+
+ // Check whether the state group is now unselected or satified.
+ auto value = selected_states.to_ulong();
+ auto number_selected = Bits::popcount(value);
+ if (number_selected == 1) {
+ // Group is no longer overconstrained.
+ state = Bits::ctz(value);
+ NPNR_ASSERT(selected_states[state]);
+ } else if (number_selected == 0) {
+ // Group is unselected.
+ state = kNoSelected;
+ } else {
+ state = kOverConstrained;
+ }
+ }
+ }
+
+ template <typename StateRange> bool requires(const StateRange &state_range) const
+ {
+ if (state < 0) {
+ return false;
+ }
+
+ for (const auto required_state : state_range) {
+ if (state == required_state) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ void print_debug(const Context *ctx, IdString object, const Definition &definition) const;
+ void explain_implies(const Context *ctx, IdString object, IdString cell, const Definition &definition, BelId bel,
+ int32_t next_state) const;
+
+ template <typename StateRange>
+ void explain_requires(const Context *ctx, IdString object, IdString cell, const Definition &definition, BelId bel,
+ const StateRange state_range) const;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/exclusive_state_groups.impl.h b/common/kernel/exclusive_state_groups.impl.h
new file mode 100644
index 00000000..f3ddb5fd
--- /dev/null
+++ b/common/kernel/exclusive_state_groups.impl.h
@@ -0,0 +1,89 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 The SymbiFlow Authors.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#pragma once
+
+#include "context.h"
+#include "exclusive_state_groups.h"
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+template <size_t StateCount, typename StateType, typename CountType>
+void ExclusiveStateGroup<StateCount, StateType, CountType>::print_debug(const Context *ctx, IdString object,
+ const Definition &definition) const
+{
+ if (state == kNoSelected) {
+ NPNR_ASSERT(selected_states.count() == 0);
+ log_info("%s.%s is currently unselected\n", object.c_str(ctx), definition.prefix.c_str(ctx));
+ } else if (state >= 0) {
+ log_info("%s.%s = %s, count = %d\n", object.c_str(ctx), definition.prefix.c_str(ctx),
+ definition.states.at(state).c_str(ctx), count[state]);
+ } else {
+ NPNR_ASSERT(state == kOverConstrained);
+ log_info("%s.%s is currently overconstrained, states selected:\n", object.c_str(ctx),
+ definition.prefix.c_str(ctx));
+ for (size_t i = 0; i < definition.states.size(); ++i) {
+ if (selected_states[i]) {
+ log_info(" - %s, count = %d\n", definition.states.at(i).c_str(ctx), count[i]);
+ }
+ }
+ }
+}
+
+template <size_t StateCount, typename StateType, typename CountType>
+void ExclusiveStateGroup<StateCount, StateType, CountType>::explain_implies(const Context *ctx, IdString object,
+ IdString cell, const Definition &definition,
+ BelId bel, int32_t next_state) const
+{
+ if (check_implies(next_state)) {
+ log_info("Placing cell %s at bel %s does not violate %s.%s\n", cell.c_str(ctx), ctx->nameOfBel(bel),
+ object.c_str(ctx), definition.prefix.c_str(ctx));
+ } else {
+ log_info("Placing cell %s at bel %s does violates %s.%s, desired state = %s.\n", cell.c_str(ctx),
+ ctx->nameOfBel(bel), object.c_str(ctx), definition.prefix.c_str(ctx),
+ definition.states.at(next_state).c_str(ctx));
+ print_debug(ctx, object, definition);
+ }
+}
+
+template <size_t StateCount, typename StateType, typename CountType>
+template <typename StateRange>
+void ExclusiveStateGroup<StateCount, StateType, CountType>::explain_requires(const Context *ctx, IdString object,
+ IdString cell,
+ const Definition &definition, BelId bel,
+ const StateRange state_range) const
+{
+ if (requires(state_range)) {
+ log_info("Placing cell %s at bel %s does not violate %s.%s\n", cell.c_str(ctx), ctx->nameOfBel(bel),
+ object.c_str(ctx), definition.prefix.c_str(ctx));
+ } else {
+ log_info("Placing cell %s at bel %s does violate %s.%s, because current state is %s, constraint requires one "
+ "of:\n",
+ cell.c_str(ctx), ctx->nameOfBel(bel), object.c_str(ctx), definition.prefix.c_str(ctx),
+ state != -1 ? definition.states.at(state).c_str(ctx) : "unset");
+
+ for (const auto required_state : state_range) {
+ log_info(" - %s\n", definition.states.at(required_state).c_str(ctx));
+ }
+ print_debug(ctx, object, definition);
+ }
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/handle_error.cc b/common/kernel/handle_error.cc
new file mode 100644
index 00000000..d5542369
--- /dev/null
+++ b/common/kernel/handle_error.cc
@@ -0,0 +1,61 @@
+#ifndef NO_PYTHON
+
+#include <Python.h>
+#include <pybind11/pybind11.h>
+#include "nextpnr.h"
+
+namespace py = pybind11;
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// Parses the value of the active python exception
+// NOTE SHOULD NOT BE CALLED IF NO EXCEPTION
+std::string parse_python_exception()
+{
+ PyObject *type_ptr = NULL, *value_ptr = NULL, *traceback_ptr = NULL;
+ // Fetch the exception info from the Python C API
+ PyErr_Fetch(&type_ptr, &value_ptr, &traceback_ptr);
+
+ // Fallback error
+ std::string ret("Unfetchable Python error");
+ // If the fetch got a type pointer, parse the type into the exception string
+ if (type_ptr != NULL) {
+ py::object obj = py::reinterpret_borrow<py::object>(type_ptr);
+ // If a valid string extraction is available, use it
+ // otherwise use fallback
+ if (py::isinstance<py::str>(obj))
+ ret = obj.cast<std::string>();
+ else
+ ret = "Unknown exception type";
+ }
+ // Do the same for the exception value (the stringification of the
+ // exception)
+ if (value_ptr != NULL) {
+ py::object obj = py::reinterpret_borrow<py::object>(value_ptr);
+ if (py::isinstance<py::str>(obj))
+ ret += ": " + obj.cast<std::string>();
+ else
+ ret += std::string(": Unparseable Python error: ");
+ }
+ // Parse lines from the traceback using the Python traceback module
+ if (traceback_ptr != NULL) {
+ py::handle h_tb(traceback_ptr);
+ // Load the traceback module and the format_tb function
+ py::object tb(py::module::import("traceback"));
+ py::object fmt_tb(tb.attr("format_tb"));
+ // Call format_tb to get a list of traceback strings
+ py::object tb_list(fmt_tb(h_tb));
+ // Join the traceback strings into a single string
+ py::object tb_str(py::str("\n") + tb_list);
+ // Extract the string, check the extraction, and fallback in necessary
+ if (py::isinstance<py::str>(tb_str))
+ ret += ": " + tb_str.cast<std::string>();
+ else
+ ret += std::string(": Unparseable Python traceback");
+ }
+ return ret;
+}
+
+NEXTPNR_NAMESPACE_END
+
+#endif // NO_PYTHON \ No newline at end of file
diff --git a/common/kernel/hashlib.h b/common/kernel/hashlib.h
new file mode 100644
index 00000000..2f7357e2
--- /dev/null
+++ b/common/kernel/hashlib.h
@@ -0,0 +1,1210 @@
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+
+// -------------------------------------------------------
+// Written by Claire Xen <claire@clairexen.net> in 2014
+// -------------------------------------------------------
+
+#ifndef HASHLIB_H
+#define HASHLIB_H
+
+#include <algorithm>
+#include <array>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+const int hashtable_size_trigger = 2;
+const int hashtable_size_factor = 3;
+
+// Cantor pairing function for two non-negative integers
+// https://en.wikipedia.org/wiki/Pairing_function
+inline unsigned int mkhash(unsigned int a, unsigned int b) { return (a * a + 3 * a + 2 * a * b + b + b * b) / 2; }
+
+// traditionally 5381 is used as starting value for the djb2 hash
+const unsigned int mkhash_init = 5381;
+
+// The ADD version of DJB2
+// (use this version for cache locality in b)
+inline unsigned int mkhash_add(unsigned int a, unsigned int b) { return ((a << 5) + a) + b; }
+
+inline unsigned int mkhash_xorshift(unsigned int a)
+{
+ if (sizeof(a) == 4) {
+ a ^= a << 13;
+ a ^= a >> 17;
+ a ^= a << 5;
+ } else if (sizeof(a) == 8) {
+ a ^= a << 13;
+ a ^= a >> 7;
+ a ^= a << 17;
+ } else
+ NPNR_ASSERT_FALSE("mkhash_xorshift() only implemented for 32 bit and 64 bit ints");
+ return a;
+}
+
+template <typename T> struct hash_ops
+{
+ static inline bool cmp(const T &a, const T &b) { return a == b; }
+ static inline unsigned int hash(const T &a) { return a.hash(); }
+};
+
+struct hash_int_ops
+{
+ template <typename T> static inline bool cmp(T a, T b) { return a == b; }
+};
+
+template <> struct hash_ops<bool> : hash_int_ops
+{
+ static inline unsigned int hash(bool a) { return a ? 1 : 0; }
+};
+template <> struct hash_ops<int32_t> : hash_int_ops
+{
+ static inline unsigned int hash(int32_t a) { return a; }
+};
+template <> struct hash_ops<int64_t> : hash_int_ops
+{
+ static inline unsigned int hash(int64_t a) { return mkhash((unsigned int)(a), (unsigned int)(a >> 32)); }
+};
+
+template <> struct hash_ops<uint32_t> : hash_int_ops
+{
+ static inline unsigned int hash(uint32_t a) { return a; }
+};
+template <> struct hash_ops<uint64_t> : hash_int_ops
+{
+ static inline unsigned int hash(uint64_t a) { return mkhash((unsigned int)(a), (unsigned int)(a >> 32)); }
+};
+
+template <> struct hash_ops<std::string>
+{
+ static inline bool cmp(const std::string &a, const std::string &b) { return a == b; }
+ static inline unsigned int hash(const std::string &a)
+ {
+ unsigned int v = 0;
+ for (auto c : a)
+ v = mkhash(v, c);
+ return v;
+ }
+};
+
+template <typename P, typename Q> struct hash_ops<std::pair<P, Q>>
+{
+ static inline bool cmp(std::pair<P, Q> a, std::pair<P, Q> b) { return a == b; }
+ static inline unsigned int hash(std::pair<P, Q> a)
+ {
+ return mkhash(hash_ops<P>::hash(a.first), hash_ops<Q>::hash(a.second));
+ }
+};
+
+template <typename... T> struct hash_ops<std::tuple<T...>>
+{
+ static inline bool cmp(std::tuple<T...> a, std::tuple<T...> b) { return a == b; }
+ template <size_t I = 0>
+ static inline typename std::enable_if<I == sizeof...(T), unsigned int>::type hash(std::tuple<T...>)
+ {
+ return mkhash_init;
+ }
+ template <size_t I = 0>
+ static inline typename std::enable_if<I != sizeof...(T), unsigned int>::type hash(std::tuple<T...> a)
+ {
+ typedef hash_ops<typename std::tuple_element<I, std::tuple<T...>>::type> element_ops_t;
+ return mkhash(hash<I + 1>(a), element_ops_t::hash(std::get<I>(a)));
+ }
+};
+
+template <typename T> struct hash_ops<std::vector<T>>
+{
+ static inline bool cmp(std::vector<T> a, std::vector<T> b) { return a == b; }
+ static inline unsigned int hash(std::vector<T> a)
+ {
+ unsigned int h = mkhash_init;
+ for (auto k : a)
+ h = mkhash(h, hash_ops<T>::hash(k));
+ return h;
+ }
+};
+
+template <typename T, size_t N> struct hash_ops<std::array<T, N>>
+{
+ static inline bool cmp(std::array<T, N> a, std::array<T, N> b) { return a == b; }
+ static inline unsigned int hash(std::array<T, N> a)
+ {
+ unsigned int h = mkhash_init;
+ for (auto k : a)
+ h = mkhash(h, hash_ops<T>::hash(k));
+ return h;
+ }
+};
+
+struct hash_cstr_ops
+{
+ static inline bool cmp(const char *a, const char *b)
+ {
+ for (int i = 0; a[i] || b[i]; i++)
+ if (a[i] != b[i])
+ return false;
+ return true;
+ }
+ static inline unsigned int hash(const char *a)
+ {
+ unsigned int hash = mkhash_init;
+ while (*a)
+ hash = mkhash(hash, *(a++));
+ return hash;
+ }
+};
+
+struct hash_ptr_ops
+{
+ static inline bool cmp(const void *a, const void *b) { return a == b; }
+ static inline unsigned int hash(const void *a) { return (uintptr_t)a; }
+};
+
+struct hash_obj_ops
+{
+ static inline bool cmp(const void *a, const void *b) { return a == b; }
+ template <typename T> static inline unsigned int hash(const T *a) { return a ? a->hash() : 0; }
+};
+
+template <typename T> inline unsigned int mkhash(const T &v) { return hash_ops<T>().hash(v); }
+
+inline int hashtable_size(int min_size)
+{
+ static std::vector<int> zero_and_some_primes = {
+ 0, 23, 29, 37, 47, 59, 79, 101, 127, 163,
+ 211, 269, 337, 431, 541, 677, 853, 1069, 1361, 1709,
+ 2137, 2677, 3347, 4201, 5261, 6577, 8231, 10289, 12889, 16127,
+ 20161, 25219, 31531, 39419, 49277, 61603, 77017, 96281, 120371, 150473,
+ 188107, 235159, 293957, 367453, 459317, 574157, 717697, 897133, 1121423, 1401791,
+ 1752239, 2190299, 2737937, 3422429, 4278037, 5347553, 6684443, 8355563, 10444457, 13055587,
+ 16319519, 20399411, 25499291, 31874149, 39842687, 49803361, 62254207, 77817767, 97272239, 121590311,
+ 151987889, 189984863, 237481091, 296851369, 371064217};
+
+ for (auto p : zero_and_some_primes)
+ if (p >= min_size)
+ return p;
+
+ if (sizeof(int) == 4)
+ throw std::length_error("hash table exceeded maximum size. use a ILP64 abi for larger tables.");
+
+ for (auto p : zero_and_some_primes)
+ if (100129 * p > min_size)
+ return 100129 * p;
+
+ throw std::length_error("hash table exceeded maximum size.");
+}
+
+template <typename K, typename T, typename OPS = hash_ops<K>> class dict;
+template <typename K, int offset = 0, typename OPS = hash_ops<K>> class idict;
+template <typename K, typename OPS = hash_ops<K>> class pool;
+template <typename K, typename OPS = hash_ops<K>> class mfp;
+
+template <typename K, typename T, typename OPS> class dict
+{
+ struct entry_t
+ {
+ std::pair<K, T> udata;
+ int next;
+
+ entry_t() {}
+ entry_t(const std::pair<K, T> &udata, int next) : udata(udata), next(next) {}
+ entry_t(std::pair<K, T> &&udata, int next) : udata(std::move(udata)), next(next) {}
+ bool operator<(const entry_t &other) const { return udata.first < other.udata.first; }
+ };
+
+ std::vector<int> hashtable;
+ std::vector<entry_t> entries;
+ OPS ops;
+
+#ifdef NDEBUG
+ static inline void do_assert(bool) {}
+#else
+ static inline void do_assert(bool cond) { NPNR_ASSERT(cond); }
+#endif
+
+ int do_hash(const K &key) const
+ {
+ unsigned int hash = 0;
+ if (!hashtable.empty())
+ hash = ops.hash(key) % (unsigned int)(hashtable.size());
+ return hash;
+ }
+
+ void do_rehash()
+ {
+ hashtable.clear();
+ hashtable.resize(hashtable_size(entries.capacity() * hashtable_size_factor), -1);
+
+ for (int i = 0; i < int(entries.size()); i++) {
+ do_assert(-1 <= entries[i].next && entries[i].next < int(entries.size()));
+ int hash = do_hash(entries[i].udata.first);
+ entries[i].next = hashtable[hash];
+ hashtable[hash] = i;
+ }
+ }
+
+ int do_erase(int index, int hash)
+ {
+ do_assert(index < int(entries.size()));
+ if (hashtable.empty() || index < 0)
+ return 0;
+
+ int k = hashtable[hash];
+ do_assert(0 <= k && k < int(entries.size()));
+
+ if (k == index) {
+ hashtable[hash] = entries[index].next;
+ } else {
+ while (entries[k].next != index) {
+ k = entries[k].next;
+ do_assert(0 <= k && k < int(entries.size()));
+ }
+ entries[k].next = entries[index].next;
+ }
+
+ int back_idx = entries.size() - 1;
+
+ if (index != back_idx) {
+ int back_hash = do_hash(entries[back_idx].udata.first);
+
+ k = hashtable[back_hash];
+ do_assert(0 <= k && k < int(entries.size()));
+
+ if (k == back_idx) {
+ hashtable[back_hash] = index;
+ } else {
+ while (entries[k].next != back_idx) {
+ k = entries[k].next;
+ do_assert(0 <= k && k < int(entries.size()));
+ }
+ entries[k].next = index;
+ }
+
+ entries[index] = std::move(entries[back_idx]);
+ }
+
+ entries.pop_back();
+
+ if (entries.empty())
+ hashtable.clear();
+
+ return 1;
+ }
+
+ int do_lookup(const K &key, int &hash) const
+ {
+ if (hashtable.empty())
+ return -1;
+
+ if (entries.size() * hashtable_size_trigger > hashtable.size()) {
+ ((dict *)this)->do_rehash();
+ hash = do_hash(key);
+ }
+
+ int index = hashtable[hash];
+
+ while (index >= 0 && !ops.cmp(entries[index].udata.first, key)) {
+ index = entries[index].next;
+ do_assert(-1 <= index && index < int(entries.size()));
+ }
+
+ return index;
+ }
+
+ int do_insert(const K &key, int &hash)
+ {
+ if (hashtable.empty()) {
+ entries.emplace_back(std::pair<K, T>(key, T()), -1);
+ do_rehash();
+ hash = do_hash(key);
+ } else {
+ entries.emplace_back(std::pair<K, T>(key, T()), hashtable[hash]);
+ hashtable[hash] = entries.size() - 1;
+ }
+ return entries.size() - 1;
+ }
+
+ int do_insert(const std::pair<K, T> &value, int &hash)
+ {
+ if (hashtable.empty()) {
+ entries.emplace_back(value, -1);
+ do_rehash();
+ hash = do_hash(value.first);
+ } else {
+ entries.emplace_back(value, hashtable[hash]);
+ hashtable[hash] = entries.size() - 1;
+ }
+ return entries.size() - 1;
+ }
+
+ int do_insert(std::pair<K, T> &&rvalue, int &hash)
+ {
+ if (hashtable.empty()) {
+ auto key = rvalue.first;
+ entries.emplace_back(std::forward<std::pair<K, T>>(rvalue), -1);
+ do_rehash();
+ hash = do_hash(key);
+ } else {
+ entries.emplace_back(std::forward<std::pair<K, T>>(rvalue), hashtable[hash]);
+ hashtable[hash] = entries.size() - 1;
+ }
+ return entries.size() - 1;
+ }
+
+ public:
+ using key_type = K;
+ using mapped_type = T;
+ using value_type = std::pair<K, T>;
+
+ class const_iterator : public std::iterator<std::forward_iterator_tag, std::pair<K, T>>
+ {
+ friend class dict;
+
+ protected:
+ const dict *ptr;
+ int index;
+ const_iterator(const dict *ptr, int index) : ptr(ptr), index(index) {}
+
+ public:
+ const_iterator() {}
+ const_iterator operator++()
+ {
+ index--;
+ return *this;
+ }
+ const_iterator operator+=(int amt)
+ {
+ index -= amt;
+ return *this;
+ }
+ bool operator<(const const_iterator &other) const { return index > other.index; }
+ bool operator==(const const_iterator &other) const { return index == other.index; }
+ bool operator!=(const const_iterator &other) const { return index != other.index; }
+ const std::pair<K, T> &operator*() const { return ptr->entries[index].udata; }
+ const std::pair<K, T> *operator->() const { return &ptr->entries[index].udata; }
+ };
+
+ class iterator : public std::iterator<std::forward_iterator_tag, std::pair<K, T>>
+ {
+ friend class dict;
+
+ protected:
+ dict *ptr;
+ int index;
+ iterator(dict *ptr, int index) : ptr(ptr), index(index) {}
+
+ public:
+ iterator() {}
+ iterator operator++()
+ {
+ index--;
+ return *this;
+ }
+ iterator operator+=(int amt)
+ {
+ index -= amt;
+ return *this;
+ }
+ bool operator<(const iterator &other) const { return index > other.index; }
+ bool operator==(const iterator &other) const { return index == other.index; }
+ bool operator!=(const iterator &other) const { return index != other.index; }
+ std::pair<K, T> &operator*() { return ptr->entries[index].udata; }
+ std::pair<K, T> *operator->() { return &ptr->entries[index].udata; }
+ const std::pair<K, T> &operator*() const { return ptr->entries[index].udata; }
+ const std::pair<K, T> *operator->() const { return &ptr->entries[index].udata; }
+ operator const_iterator() const { return const_iterator(ptr, index); }
+ };
+
+ dict() {}
+
+ dict(const dict &other)
+ {
+ entries = other.entries;
+ do_rehash();
+ }
+
+ dict(dict &&other) { swap(other); }
+
+ dict &operator=(const dict &other)
+ {
+ entries = other.entries;
+ do_rehash();
+ return *this;
+ }
+
+ dict &operator=(dict &&other)
+ {
+ clear();
+ swap(other);
+ return *this;
+ }
+
+ dict(const std::initializer_list<std::pair<K, T>> &list)
+ {
+ for (auto &it : list)
+ insert(it);
+ }
+
+ template <class InputIterator> dict(InputIterator first, InputIterator last) { insert(first, last); }
+
+ template <class InputIterator> void insert(InputIterator first, InputIterator last)
+ {
+ for (; first != last; ++first)
+ insert(*first);
+ }
+
+ std::pair<iterator, bool> insert(const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(key, hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> insert(const std::pair<K, T> &value)
+ {
+ int hash = do_hash(value.first);
+ int i = do_lookup(value.first, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(value, hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> insert(std::pair<K, T> &&rvalue)
+ {
+ int hash = do_hash(rvalue.first);
+ int i = do_lookup(rvalue.first, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::forward<std::pair<K, T>>(rvalue), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> emplace(K const &key, T const &value)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::make_pair(key, value), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> emplace(K const &key, T &&rvalue)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::make_pair(key, std::forward<T>(rvalue)), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> emplace(K &&rkey, T const &value)
+ {
+ int hash = do_hash(rkey);
+ int i = do_lookup(rkey, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::make_pair(std::forward<K>(rkey), value), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> emplace(K &&rkey, T &&rvalue)
+ {
+ int hash = do_hash(rkey);
+ int i = do_lookup(rkey, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::make_pair(std::forward<K>(rkey), std::forward<T>(rvalue)), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ int erase(const K &key)
+ {
+ int hash = do_hash(key);
+ int index = do_lookup(key, hash);
+ return do_erase(index, hash);
+ }
+
+ iterator erase(iterator it)
+ {
+ int hash = do_hash(it->first);
+ do_erase(it.index, hash);
+ return ++it;
+ }
+
+ int count(const K &key) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ return i < 0 ? 0 : 1;
+ }
+
+ int count(const K &key, const_iterator it) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ return i < 0 || i > it.index ? 0 : 1;
+ }
+
+ iterator find(const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ return end();
+ return iterator(this, i);
+ }
+
+ const_iterator find(const K &key) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ return end();
+ return const_iterator(this, i);
+ }
+
+ T &at(const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ throw std::out_of_range("dict::at()");
+ return entries[i].udata.second;
+ }
+
+ const T &at(const K &key) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ throw std::out_of_range("dict::at()");
+ return entries[i].udata.second;
+ }
+
+ const T &at(const K &key, const T &defval) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ return defval;
+ return entries[i].udata.second;
+ }
+
+ T &operator[](const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ i = do_insert(std::pair<K, T>(key, T()), hash);
+ return entries[i].udata.second;
+ }
+
+ template <typename Compare = std::less<K>> void sort(Compare comp = Compare())
+ {
+ std::sort(entries.begin(), entries.end(),
+ [comp](const entry_t &a, const entry_t &b) { return comp(b.udata.first, a.udata.first); });
+ do_rehash();
+ }
+
+ void swap(dict &other)
+ {
+ hashtable.swap(other.hashtable);
+ entries.swap(other.entries);
+ }
+
+ bool operator==(const dict &other) const
+ {
+ if (size() != other.size())
+ return false;
+ for (auto &it : entries) {
+ auto oit = other.find(it.udata.first);
+ if (oit == other.end() || !(oit->second == it.udata.second))
+ return false;
+ }
+ return true;
+ }
+
+ bool operator!=(const dict &other) const { return !operator==(other); }
+
+ unsigned int hash() const
+ {
+ unsigned int h = mkhash_init;
+ for (auto &entry : entries) {
+ h ^= hash_ops<K>::hash(entry.udata.first);
+ h ^= hash_ops<T>::hash(entry.udata.second);
+ }
+ return h;
+ }
+
+ void reserve(size_t n) { entries.reserve(n); }
+ size_t size() const { return entries.size(); }
+ bool empty() const { return entries.empty(); }
+ void clear()
+ {
+ hashtable.clear();
+ entries.clear();
+ }
+
+ iterator begin() { return iterator(this, int(entries.size()) - 1); }
+ iterator element(int n) { return iterator(this, int(entries.size()) - 1 - n); }
+ iterator end() { return iterator(nullptr, -1); }
+
+ const_iterator begin() const { return const_iterator(this, int(entries.size()) - 1); }
+ const_iterator element(int n) const { return const_iterator(this, int(entries.size()) - 1 - n); }
+ const_iterator end() const { return const_iterator(nullptr, -1); }
+};
+
+template <typename K, typename OPS> class pool
+{
+ template <typename, int, typename> friend class idict;
+
+ protected:
+ struct entry_t
+ {
+ K udata;
+ int next;
+
+ entry_t() {}
+ entry_t(const K &udata, int next) : udata(udata), next(next) {}
+ entry_t(K &&udata, int next) : udata(std::move(udata)), next(next) {}
+ };
+
+ std::vector<int> hashtable;
+ std::vector<entry_t> entries;
+ OPS ops;
+
+#ifdef NDEBUG
+ static inline void do_assert(bool) {}
+#else
+ static inline void do_assert(bool cond) { NPNR_ASSERT(cond); }
+#endif
+
+ int do_hash(const K &key) const
+ {
+ unsigned int hash = 0;
+ if (!hashtable.empty())
+ hash = ops.hash(key) % (unsigned int)(hashtable.size());
+ return hash;
+ }
+
+ void do_rehash()
+ {
+ hashtable.clear();
+ hashtable.resize(hashtable_size(entries.capacity() * hashtable_size_factor), -1);
+
+ for (int i = 0; i < int(entries.size()); i++) {
+ do_assert(-1 <= entries[i].next && entries[i].next < int(entries.size()));
+ int hash = do_hash(entries[i].udata);
+ entries[i].next = hashtable[hash];
+ hashtable[hash] = i;
+ }
+ }
+
+ int do_erase(int index, int hash)
+ {
+ do_assert(index < int(entries.size()));
+ if (hashtable.empty() || index < 0)
+ return 0;
+
+ int k = hashtable[hash];
+ if (k == index) {
+ hashtable[hash] = entries[index].next;
+ } else {
+ while (entries[k].next != index) {
+ k = entries[k].next;
+ do_assert(0 <= k && k < int(entries.size()));
+ }
+ entries[k].next = entries[index].next;
+ }
+
+ int back_idx = entries.size() - 1;
+
+ if (index != back_idx) {
+ int back_hash = do_hash(entries[back_idx].udata);
+
+ k = hashtable[back_hash];
+ if (k == back_idx) {
+ hashtable[back_hash] = index;
+ } else {
+ while (entries[k].next != back_idx) {
+ k = entries[k].next;
+ do_assert(0 <= k && k < int(entries.size()));
+ }
+ entries[k].next = index;
+ }
+
+ entries[index] = std::move(entries[back_idx]);
+ }
+
+ entries.pop_back();
+
+ if (entries.empty())
+ hashtable.clear();
+
+ return 1;
+ }
+
+ int do_lookup(const K &key, int &hash) const
+ {
+ if (hashtable.empty())
+ return -1;
+
+ if (entries.size() * hashtable_size_trigger > hashtable.size()) {
+ ((pool *)this)->do_rehash();
+ hash = do_hash(key);
+ }
+
+ int index = hashtable[hash];
+
+ while (index >= 0 && !ops.cmp(entries[index].udata, key)) {
+ index = entries[index].next;
+ do_assert(-1 <= index && index < int(entries.size()));
+ }
+
+ return index;
+ }
+
+ int do_insert(const K &value, int &hash)
+ {
+ if (hashtable.empty()) {
+ entries.emplace_back(value, -1);
+ do_rehash();
+ hash = do_hash(value);
+ } else {
+ entries.emplace_back(value, hashtable[hash]);
+ hashtable[hash] = entries.size() - 1;
+ }
+ return entries.size() - 1;
+ }
+
+ int do_insert(K &&rvalue, int &hash)
+ {
+ if (hashtable.empty()) {
+ entries.emplace_back(std::forward<K>(rvalue), -1);
+ do_rehash();
+ hash = do_hash(rvalue);
+ } else {
+ entries.emplace_back(std::forward<K>(rvalue), hashtable[hash]);
+ hashtable[hash] = entries.size() - 1;
+ }
+ return entries.size() - 1;
+ }
+
+ public:
+ class const_iterator : public std::iterator<std::forward_iterator_tag, K>
+ {
+ friend class pool;
+
+ protected:
+ const pool *ptr;
+ int index;
+ const_iterator(const pool *ptr, int index) : ptr(ptr), index(index) {}
+
+ public:
+ const_iterator() {}
+ const_iterator operator++()
+ {
+ index--;
+ return *this;
+ }
+ bool operator==(const const_iterator &other) const { return index == other.index; }
+ bool operator!=(const const_iterator &other) const { return index != other.index; }
+ const K &operator*() const { return ptr->entries[index].udata; }
+ const K *operator->() const { return &ptr->entries[index].udata; }
+ };
+
+ class iterator : public std::iterator<std::forward_iterator_tag, K>
+ {
+ friend class pool;
+
+ protected:
+ pool *ptr;
+ int index;
+ iterator(pool *ptr, int index) : ptr(ptr), index(index) {}
+
+ public:
+ iterator() {}
+ iterator operator++()
+ {
+ index--;
+ return *this;
+ }
+ bool operator==(const iterator &other) const { return index == other.index; }
+ bool operator!=(const iterator &other) const { return index != other.index; }
+ K &operator*() { return ptr->entries[index].udata; }
+ K *operator->() { return &ptr->entries[index].udata; }
+ const K &operator*() const { return ptr->entries[index].udata; }
+ const K *operator->() const { return &ptr->entries[index].udata; }
+ operator const_iterator() const { return const_iterator(ptr, index); }
+ };
+
+ pool() {}
+
+ pool(const pool &other)
+ {
+ entries = other.entries;
+ do_rehash();
+ }
+
+ pool(pool &&other) { swap(other); }
+
+ pool &operator=(const pool &other)
+ {
+ entries = other.entries;
+ do_rehash();
+ return *this;
+ }
+
+ pool &operator=(pool &&other)
+ {
+ clear();
+ swap(other);
+ return *this;
+ }
+
+ pool(const std::initializer_list<K> &list)
+ {
+ for (auto &it : list)
+ insert(it);
+ }
+
+ template <class InputIterator> pool(InputIterator first, InputIterator last) { insert(first, last); }
+
+ template <class InputIterator> void insert(InputIterator first, InputIterator last)
+ {
+ for (; first != last; ++first)
+ insert(*first);
+ }
+
+ std::pair<iterator, bool> insert(const K &value)
+ {
+ int hash = do_hash(value);
+ int i = do_lookup(value, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(value, hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ std::pair<iterator, bool> insert(K &&rvalue)
+ {
+ int hash = do_hash(rvalue);
+ int i = do_lookup(rvalue, hash);
+ if (i >= 0)
+ return std::pair<iterator, bool>(iterator(this, i), false);
+ i = do_insert(std::forward<K>(rvalue), hash);
+ return std::pair<iterator, bool>(iterator(this, i), true);
+ }
+
+ template <typename... Args> std::pair<iterator, bool> emplace(Args &&...args)
+ {
+ return insert(K(std::forward<Args>(args)...));
+ }
+
+ int erase(const K &key)
+ {
+ int hash = do_hash(key);
+ int index = do_lookup(key, hash);
+ return do_erase(index, hash);
+ }
+
+ iterator erase(iterator it)
+ {
+ int hash = do_hash(*it);
+ do_erase(it.index, hash);
+ return ++it;
+ }
+
+ int count(const K &key) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ return i < 0 ? 0 : 1;
+ }
+
+ int count(const K &key, const_iterator it) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ return i < 0 || i > it.index ? 0 : 1;
+ }
+
+ iterator find(const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ return end();
+ return iterator(this, i);
+ }
+
+ const_iterator find(const K &key) const
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ if (i < 0)
+ return end();
+ return const_iterator(this, i);
+ }
+
+ bool operator[](const K &key)
+ {
+ int hash = do_hash(key);
+ int i = do_lookup(key, hash);
+ return i >= 0;
+ }
+
+ template <typename Compare = std::less<K>> void sort(Compare comp = Compare())
+ {
+ std::sort(entries.begin(), entries.end(),
+ [comp](const entry_t &a, const entry_t &b) { return comp(b.udata, a.udata); });
+ do_rehash();
+ }
+
+ K pop()
+ {
+ iterator it = begin();
+ K ret = *it;
+ erase(it);
+ return ret;
+ }
+
+ void swap(pool &other)
+ {
+ hashtable.swap(other.hashtable);
+ entries.swap(other.entries);
+ }
+
+ bool operator==(const pool &other) const
+ {
+ if (size() != other.size())
+ return false;
+ for (auto &it : entries)
+ if (!other.count(it.udata))
+ return false;
+ return true;
+ }
+
+ bool operator!=(const pool &other) const { return !operator==(other); }
+
+ bool hash() const
+ {
+ unsigned int hashval = mkhash_init;
+ for (auto &it : entries)
+ hashval ^= ops.hash(it.udata);
+ return hashval;
+ }
+
+ void reserve(size_t n) { entries.reserve(n); }
+ size_t size() const { return entries.size(); }
+ bool empty() const { return entries.empty(); }
+ void clear()
+ {
+ hashtable.clear();
+ entries.clear();
+ }
+
+ iterator begin() { return iterator(this, int(entries.size()) - 1); }
+ iterator element(int n) { return iterator(this, int(entries.size()) - 1 - n); }
+ iterator end() { return iterator(nullptr, -1); }
+
+ const_iterator begin() const { return const_iterator(this, int(entries.size()) - 1); }
+ const_iterator element(int n) const { return const_iterator(this, int(entries.size()) - 1 - n); }
+ const_iterator end() const { return const_iterator(nullptr, -1); }
+};
+
+template <typename K, int offset, typename OPS> class idict
+{
+ pool<K, OPS> database;
+
+ public:
+ class const_iterator : public std::iterator<std::forward_iterator_tag, K>
+ {
+ friend class idict;
+
+ protected:
+ const idict &container;
+ int index;
+ const_iterator(const idict &container, int index) : container(container), index(index) {}
+
+ public:
+ const_iterator() {}
+ const_iterator operator++()
+ {
+ index++;
+ return *this;
+ }
+ bool operator==(const const_iterator &other) const { return index == other.index; }
+ bool operator!=(const const_iterator &other) const { return index != other.index; }
+ const K &operator*() const { return container[index]; }
+ const K *operator->() const { return &container[index]; }
+ };
+
+ int operator()(const K &key)
+ {
+ int hash = database.do_hash(key);
+ int i = database.do_lookup(key, hash);
+ if (i < 0)
+ i = database.do_insert(key, hash);
+ return i + offset;
+ }
+
+ int at(const K &key) const
+ {
+ int hash = database.do_hash(key);
+ int i = database.do_lookup(key, hash);
+ if (i < 0)
+ throw std::out_of_range("idict::at()");
+ return i + offset;
+ }
+
+ int at(const K &key, int defval) const
+ {
+ int hash = database.do_hash(key);
+ int i = database.do_lookup(key, hash);
+ if (i < 0)
+ return defval;
+ return i + offset;
+ }
+
+ int count(const K &key) const
+ {
+ int hash = database.do_hash(key);
+ int i = database.do_lookup(key, hash);
+ return i < 0 ? 0 : 1;
+ }
+
+ void expect(const K &key, int i)
+ {
+ int j = (*this)(key);
+ if (i != j)
+ throw std::out_of_range("idict::expect()");
+ }
+
+ const K &operator[](int index) const { return database.entries.at(index - offset).udata; }
+
+ void swap(idict &other) { database.swap(other.database); }
+
+ void reserve(size_t n) { database.reserve(n); }
+ size_t size() const { return database.size(); }
+ bool empty() const { return database.empty(); }
+ void clear() { database.clear(); }
+
+ const_iterator begin() const { return const_iterator(*this, offset); }
+ const_iterator element(int n) const { return const_iterator(*this, n); }
+ const_iterator end() const { return const_iterator(*this, offset + size()); }
+};
+
+template <typename K, typename OPS> class mfp
+{
+ mutable idict<K, 0, OPS> database;
+ mutable std::vector<int> parents;
+
+ public:
+ typedef typename idict<K, 0, OPS>::const_iterator const_iterator;
+
+ int operator()(const K &key) const
+ {
+ int i = database(key);
+ parents.resize(database.size(), -1);
+ return i;
+ }
+
+ const K &operator[](int index) const { return database[index]; }
+
+ int ifind(int i) const
+ {
+ int p = i, k = i;
+
+ while (parents[p] != -1)
+ p = parents[p];
+
+ while (k != p) {
+ int next_k = parents[k];
+ parents[k] = p;
+ k = next_k;
+ }
+
+ return p;
+ }
+
+ void imerge(int i, int j)
+ {
+ i = ifind(i);
+ j = ifind(j);
+
+ if (i != j)
+ parents[i] = j;
+ }
+
+ void ipromote(int i)
+ {
+ int k = i;
+
+ while (k != -1) {
+ int next_k = parents[k];
+ parents[k] = i;
+ k = next_k;
+ }
+
+ parents[i] = -1;
+ }
+
+ int lookup(const K &a) const { return ifind((*this)(a)); }
+
+ const K &find(const K &a) const
+ {
+ int i = database.at(a, -1);
+ if (i < 0)
+ return a;
+ return (*this)[ifind(i)];
+ }
+
+ void merge(const K &a, const K &b) { imerge((*this)(a), (*this)(b)); }
+
+ void promote(const K &a)
+ {
+ int i = database.at(a, -1);
+ if (i >= 0)
+ ipromote(i);
+ }
+
+ void swap(mfp &other)
+ {
+ database.swap(other.database);
+ parents.swap(other.parents);
+ }
+
+ void reserve(size_t n) { database.reserve(n); }
+ size_t size() const { return database.size(); }
+ bool empty() const { return database.empty(); }
+ void clear()
+ {
+ database.clear();
+ parents.clear();
+ }
+
+ const_iterator begin() const { return database.begin(); }
+ const_iterator element(int n) const { return database.element(n); }
+ const_iterator end() const { return database.end(); }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/idstring.cc b/common/kernel/idstring.cc
new file mode 100644
index 00000000..9e27ac6f
--- /dev/null
+++ b/common/kernel/idstring.cc
@@ -0,0 +1,51 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "idstring.h"
+
+#include "basectx.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+void IdString::set(const BaseCtx *ctx, const std::string &s)
+{
+ auto it = ctx->idstring_str_to_idx->find(s);
+ if (it == ctx->idstring_str_to_idx->end()) {
+ index = ctx->idstring_idx_to_str->size();
+ auto insert_rc = ctx->idstring_str_to_idx->insert({s, index});
+ ctx->idstring_idx_to_str->push_back(&insert_rc.first->first);
+ } else {
+ index = it->second;
+ }
+}
+
+const std::string &IdString::str(const BaseCtx *ctx) const { return *ctx->idstring_idx_to_str->at(index); }
+
+const char *IdString::c_str(const BaseCtx *ctx) const { return str(ctx).c_str(); }
+
+void IdString::initialize_add(const BaseCtx *ctx, const char *s, int idx)
+{
+ NPNR_ASSERT(ctx->idstring_str_to_idx->count(s) == 0);
+ NPNR_ASSERT(int(ctx->idstring_idx_to_str->size()) == idx);
+ auto insert_rc = ctx->idstring_str_to_idx->insert({s, idx});
+ ctx->idstring_idx_to_str->push_back(&insert_rc.first->first);
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/idstring.h b/common/kernel/idstring.h
new file mode 100644
index 00000000..019e0a2a
--- /dev/null
+++ b/common/kernel/idstring.h
@@ -0,0 +1,75 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef IDSTRING_H
+#define IDSTRING_H
+
+#include <string>
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct BaseCtx;
+
+struct IdString
+{
+ int index;
+
+ static void initialize_arch(const BaseCtx *ctx);
+
+ static void initialize_add(const BaseCtx *ctx, const char *s, int idx);
+
+ constexpr IdString() : index(0) {}
+ explicit constexpr IdString(int index) : index(index) {}
+
+ void set(const BaseCtx *ctx, const std::string &s);
+
+ IdString(const BaseCtx *ctx, const std::string &s) { set(ctx, s); }
+
+ IdString(const BaseCtx *ctx, const char *s) { set(ctx, s); }
+
+ const std::string &str(const BaseCtx *ctx) const;
+
+ const char *c_str(const BaseCtx *ctx) const;
+
+ bool operator<(const IdString &other) const { return index < other.index; }
+
+ bool operator==(const IdString &other) const { return index == other.index; }
+
+ bool operator!=(const IdString &other) const { return index != other.index; }
+
+ bool empty() const { return index == 0; }
+
+ unsigned int hash() const { return index; }
+
+ template <typename... Args> bool in(Args... args) const
+ {
+ // Credit: https://articles.emptycrate.com/2016/05/14/folds_in_cpp11_ish.html
+ bool result = false;
+ (void)std::initializer_list<int>{(result = result || in(args), 0)...};
+ return result;
+ }
+
+ bool in(const IdString &rhs) const { return *this == rhs; }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* IDSTRING_H */
diff --git a/common/kernel/idstringlist.cc b/common/kernel/idstringlist.cc
new file mode 100644
index 00000000..624622cf
--- /dev/null
+++ b/common/kernel/idstringlist.cc
@@ -0,0 +1,80 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "idstringlist.h"
+#include "context.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+IdStringList IdStringList::parse(Context *ctx, const std::string &str)
+{
+ char delim = ctx->getNameDelimiter();
+ size_t id_count = std::count(str.begin(), str.end(), delim) + 1;
+ IdStringList list(id_count);
+ size_t start = 0;
+ for (size_t i = 0; i < id_count; i++) {
+ size_t end = str.find(delim, start);
+ NPNR_ASSERT((i == (id_count - 1)) || (end != std::string::npos));
+ list.ids[i] = ctx->id(str.substr(start, end - start));
+ start = end + 1;
+ }
+ return list;
+}
+
+void IdStringList::build_str(const Context *ctx, std::string &str) const
+{
+ char delim = ctx->getNameDelimiter();
+ bool first = true;
+ str.clear();
+ for (auto entry : ids) {
+ if (!first)
+ str += delim;
+ str += entry.str(ctx);
+ first = false;
+ }
+}
+
+std::string IdStringList::str(const Context *ctx) const
+{
+ std::string s;
+ build_str(ctx, s);
+ return s;
+}
+
+IdStringList IdStringList::concat(IdStringList a, IdStringList b)
+{
+ IdStringList result(a.size() + b.size());
+ for (size_t i = 0; i < a.size(); i++)
+ result.ids[i] = a[i];
+ for (size_t i = 0; i < b.size(); i++)
+ result.ids[a.size() + i] = b[i];
+ return result;
+}
+
+IdStringList IdStringList::slice(size_t s, size_t e) const
+{
+ NPNR_ASSERT(e >= s);
+ IdStringList result(e - s);
+ for (size_t i = 0; i < result.size(); i++)
+ result.ids[i] = ids[s + i];
+ return result;
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/idstringlist.h b/common/kernel/idstringlist.h
new file mode 100644
index 00000000..5e462d0e
--- /dev/null
+++ b/common/kernel/idstringlist.h
@@ -0,0 +1,87 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef IDSTRING_LIST_H
+#define IDSTRING_LIST_H
+
+#include <boost/functional/hash.hpp>
+#include "hashlib.h"
+#include "idstring.h"
+#include "nextpnr_namespaces.h"
+#include "sso_array.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Context;
+
+struct IdStringList
+{
+ SSOArray<IdString, 4> ids;
+
+ IdStringList() : ids(1, IdString()){};
+ explicit IdStringList(size_t n) : ids(n, IdString()){};
+ explicit IdStringList(IdString id) : ids(1, id){};
+ template <typename Tlist> explicit IdStringList(const Tlist &list) : ids(list){};
+
+ static IdStringList parse(Context *ctx, const std::string &str);
+ void build_str(const Context *ctx, std::string &str) const;
+ std::string str(const Context *ctx) const;
+
+ size_t size() const { return ids.size(); }
+ const IdString *begin() const { return ids.begin(); }
+ const IdString *end() const { return ids.end(); }
+ const IdString &operator[](size_t idx) const { return ids[idx]; }
+ bool operator==(const IdStringList &other) const { return ids == other.ids; }
+ bool operator!=(const IdStringList &other) const { return ids != other.ids; }
+ bool operator<(const IdStringList &other) const
+ {
+ if (size() > other.size())
+ return false;
+ if (size() < other.size())
+ return true;
+ for (size_t i = 0; i < size(); i++) {
+ IdString a = ids[i], b = other[i];
+ if (a.index < b.index)
+ return true;
+ if (a.index > b.index)
+ return false;
+ }
+ return false;
+ }
+
+ static IdStringList concat(IdStringList a, IdStringList b);
+ static IdStringList concat(IdString a, IdString b) { return concat(IdStringList(a), IdStringList(b)); }
+ static IdStringList concat(IdStringList a, IdString b) { return concat(a, IdStringList(b)); }
+ static IdStringList concat(IdString a, IdStringList b) { return concat(IdStringList(a), b); }
+
+ IdStringList slice(size_t s, size_t e) const;
+
+ unsigned int hash() const
+ {
+ unsigned int h = mkhash_init;
+ for (const auto &val : ids)
+ h = mkhash(h, val.hash());
+ return h;
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* IDSTRING_LIST_H */
diff --git a/common/kernel/indexed_store.h b/common/kernel/indexed_store.h
new file mode 100644
index 00000000..df607c13
--- /dev/null
+++ b/common/kernel/indexed_store.h
@@ -0,0 +1,297 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021-22 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef INDEXED_STORE_H
+#define INDEXED_STORE_H
+
+#include <algorithm>
+#include <limits>
+#include <type_traits>
+#include <vector>
+
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+template <typename T> struct store_index
+{
+ int32_t m_index = -1;
+ store_index() = default;
+ explicit store_index(int32_t index) : m_index(index){};
+ int32_t idx() const { return m_index; }
+ void set(int32_t index) { m_index = index; }
+ bool empty() const { return m_index == -1; }
+ bool operator==(const store_index<T> &other) const { return m_index == other.m_index; }
+ bool operator!=(const store_index<T> &other) const { return m_index != other.m_index; }
+ bool operator<(const store_index<T> &other) const { return m_index < other.m_index; }
+ unsigned int hash() const { return m_index; }
+
+ operator bool() const { return !empty(); }
+ operator int() const = delete;
+ bool operator!() const { return empty(); }
+};
+
+// "Slotted" indexed object store
+template <typename T> class indexed_store
+{
+ private:
+ // This should move to using std::optional at some point
+ class slot
+ {
+ private:
+ alignas(T) unsigned char storage[sizeof(T)];
+ int32_t next_free;
+ bool active;
+ inline T &obj() { return reinterpret_cast<T &>(storage); }
+ inline const T &obj() const { return reinterpret_cast<const T &>(storage); }
+ friend class indexed_store<T>;
+
+ public:
+ slot() : next_free(std::numeric_limits<int32_t>::max()), active(false){};
+ slot(slot &&other) : next_free(other.next_free), active(other.active)
+ {
+ if (active)
+ ::new (static_cast<void *>(&storage)) T(std::move(other.obj()));
+ };
+
+ slot(const slot &other) : next_free(other.next_free), active(other.active)
+ {
+ if (active)
+ ::new (static_cast<void *>(&storage)) T(other.obj());
+ };
+
+ template <class... Args> void create(Args &&...args)
+ {
+ NPNR_ASSERT(!active);
+ active = true;
+ ::new (static_cast<void *>(&storage)) T(std::forward<Args &&>(args)...);
+ }
+ bool empty() const { return !active; }
+ T &get()
+ {
+ NPNR_ASSERT(active);
+ return reinterpret_cast<T &>(storage);
+ }
+ const T &get() const
+ {
+ NPNR_ASSERT(active);
+ return reinterpret_cast<const T &>(storage);
+ }
+ void free(int32_t first_free)
+ {
+ NPNR_ASSERT(active);
+ obj().~T();
+ active = false;
+ next_free = first_free;
+ }
+ ~slot()
+ {
+ if (active)
+ obj().~T();
+ }
+ };
+
+ std::vector<slot> slots;
+ int32_t first_free = 0;
+ int32_t active_count = 0;
+
+ public:
+ // Create a new entry and return its index
+ template <class... Args> store_index<T> add(Args &&...args)
+ {
+ ++active_count;
+ if (first_free == int32_t(slots.size())) {
+ slots.emplace_back();
+ slots.back().create(std::forward<Args &&>(args)...);
+ ++first_free;
+ return store_index<T>(int32_t(slots.size()) - 1);
+ } else {
+ int32_t idx = first_free;
+ auto &slot = slots.at(idx);
+ first_free = slot.next_free;
+ slot.create(std::forward<Args &&>(args)...);
+ return store_index<T>(idx);
+ }
+ }
+
+ // Remove an entry at an index
+ void remove(store_index<T> idx)
+ {
+ --active_count;
+ slots.at(idx.m_index).free(first_free);
+ first_free = idx.m_index;
+ }
+
+ void clear()
+ {
+ active_count = 0;
+ first_free = 0;
+ slots.clear();
+ }
+
+ // Number of live entries
+ int32_t entries() const { return active_count; }
+ bool empty() const { return (entries() == 0); }
+
+ // Reserve a certain amount of space
+ void reserve(int32_t size) { slots.reserve(size); }
+
+ // Check if an index exists
+ int32_t count(store_index<T> idx)
+ {
+ if (idx.m_index < 0 || idx.m_index >= int32_t(slots.size()))
+ return 0;
+ return slots.at(idx.m_index).empty() ? 0 : 1;
+ }
+
+ // Get an item by index
+ T &at(store_index<T> idx) { return slots.at(idx.m_index).get(); }
+ const T &at(store_index<T> idx) const { return slots.at(idx.m_index).get(); }
+ T &operator[](store_index<T> idx) { return slots.at(idx.m_index).get(); }
+ const T &operator[](store_index<T> idx) const { return slots.at(idx.m_index).get(); }
+
+ // Total size of the container
+ int32_t capacity() const { return int32_t(slots.size()); }
+
+ // Iterate over items
+ template <typename It, typename S> class enumerated_iterator;
+
+ class iterator
+ {
+ private:
+ indexed_store *base;
+ int32_t index = 0;
+
+ public:
+ iterator(indexed_store *base, int32_t index) : base(base), index(index){};
+ inline bool operator!=(const iterator &other) const { return other.index != index; }
+ inline bool operator==(const iterator &other) const { return other.index == index; }
+ inline iterator operator++()
+ {
+ // skip over unused slots
+ do {
+ index++;
+ } while (index < int32_t(base->slots.size()) && !base->slots.at(index).active);
+ return *this;
+ }
+ inline iterator operator++(int)
+ {
+ iterator prior(*this);
+ do {
+ index++;
+ } while (index < int32_t(base->slots.size()) && !base->slots.at(index).active);
+ return prior;
+ }
+ T &operator*() { return base->at(store_index<T>(index)); }
+ template <typename It, typename S> friend class indexed_store::enumerated_iterator;
+ };
+ iterator begin()
+ {
+ auto it = iterator{this, -1};
+ ++it;
+ return it;
+ }
+ iterator end() { return iterator{this, int32_t(slots.size())}; }
+
+ class const_iterator
+ {
+ private:
+ const indexed_store *base;
+ int32_t index = 0;
+
+ public:
+ const_iterator(const indexed_store *base, int32_t index) : base(base), index(index){};
+ inline bool operator!=(const const_iterator &other) const { return other.index != index; }
+ inline bool operator==(const const_iterator &other) const { return other.index == index; }
+ inline const_iterator operator++()
+ {
+ // skip over unused slots
+ do {
+ index++;
+ } while (index < int32_t(base->slots.size()) && !base->slots.at(index).active);
+ return *this;
+ }
+ inline const_iterator operator++(int)
+ {
+ iterator prior(*this);
+ do {
+ index++;
+ } while (index < int32_t(base->slots.size()) && !base->slots.at(index).active);
+ return prior;
+ }
+ const T &operator*() { return base->at(store_index<T>(index)); }
+ template <typename It, typename S> friend class indexed_store::enumerated_iterator;
+ };
+ const_iterator begin() const
+ {
+ auto it = const_iterator{this, -1};
+ ++it;
+ return it;
+ }
+ const_iterator end() const { return const_iterator{this, int32_t(slots.size())}; }
+
+ template <typename S> struct enumerated_item
+ {
+ enumerated_item(int32_t index, T &value) : index(index), value(value){};
+ store_index<std::remove_cv_t<S>> index;
+ S &value;
+ };
+
+ template <typename It, typename S> class enumerated_iterator
+ {
+ private:
+ It base;
+
+ public:
+ enumerated_iterator(const It &base) : base(base){};
+ inline bool operator!=(const enumerated_iterator<It, S> &other) const { return other.base != base; }
+ inline bool operator==(const enumerated_iterator<It, S> &other) const { return other.base == base; }
+ inline enumerated_iterator<It, S> operator++()
+ {
+ ++base;
+ return *this;
+ }
+ inline enumerated_iterator<It, S> operator++(int)
+ {
+ iterator prior(*this);
+ ++base;
+ return prior;
+ }
+ enumerated_item<S> operator*() { return enumerated_item<S>{base.index, *base}; }
+ };
+
+ template <typename It, typename S> struct enumerated_range
+ {
+ enumerated_range(const It &begin, const It &end) : m_begin(begin), m_end(end){};
+ enumerated_iterator<It, S> m_begin, m_end;
+ enumerated_iterator<It, S> begin() { return m_begin; }
+ enumerated_iterator<It, S> end() { return m_end; }
+ };
+
+ enumerated_range<iterator, T> enumerate() { return enumerated_range<iterator, T>{begin(), end()}; }
+ enumerated_range<const_iterator, const T> enumerate() const
+ {
+ return enumerated_range<iterator, T>{begin(), end()};
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/log.cc b/common/kernel/log.cc
new file mode 100644
index 00000000..8b1ad43b
--- /dev/null
+++ b/common/kernel/log.cc
@@ -0,0 +1,198 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <list>
+#include <map>
+#include <set>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+NPNR_NORETURN void logv_error(const char *format, va_list ap) NPNR_ATTRIBUTE(noreturn);
+
+std::vector<std::pair<std::ostream *, LogLevel>> log_streams;
+log_write_type log_write_function = nullptr;
+
+std::string log_last_error;
+void (*log_error_atexit)() = NULL;
+
+dict<LogLevel, int, loglevel_hash_ops> message_count_by_level;
+static int log_newline_count = 0;
+bool had_nonfatal_error = false;
+
+std::string stringf(const char *fmt, ...)
+{
+ std::string string;
+ va_list ap;
+
+ va_start(ap, fmt);
+ string = vstringf(fmt, ap);
+ va_end(ap);
+
+ return string;
+}
+
+std::string vstringf(const char *fmt, va_list ap)
+{
+ std::string string;
+ char *str = NULL;
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+ int sz = 64 + strlen(fmt), rc;
+ while (1) {
+ va_list apc;
+ va_copy(apc, ap);
+ str = (char *)realloc(str, sz);
+ rc = vsnprintf(str, sz, fmt, apc);
+ va_end(apc);
+ if (rc >= 0 && rc < sz)
+ break;
+ sz *= 2;
+ }
+#else
+ if (vasprintf(&str, fmt, ap) < 0)
+ str = NULL;
+#endif
+
+ if (str != NULL) {
+ string = str;
+ free(str);
+ }
+
+ return string;
+}
+
+void logv(const char *format, va_list ap, LogLevel level = LogLevel::LOG_MSG)
+{
+ //
+ // Trim newlines from the beginning
+ while (format[0] == '\n' && format[1] != 0) {
+ log_always("\n");
+ format++;
+ }
+
+ std::string str = vstringf(format, ap);
+
+ if (str.empty())
+ return;
+
+ size_t nnl_pos = str.find_last_not_of('\n');
+ if (nnl_pos == std::string::npos)
+ log_newline_count += str.size();
+ else
+ log_newline_count = str.size() - nnl_pos - 1;
+
+ for (auto f : log_streams)
+ if (f.second <= level)
+ *f.first << str;
+ if (log_write_function)
+ log_write_function(str);
+}
+
+void log_with_level(LogLevel level, const char *format, ...)
+{
+ message_count_by_level[level]++;
+ va_list ap;
+ va_start(ap, format);
+ logv(format, ap, level);
+ va_end(ap);
+}
+
+void logv_prefixed(const char *prefix, const char *format, va_list ap, LogLevel level)
+{
+ std::string message = vstringf(format, ap);
+
+ log_with_level(level, "%s%s", prefix, message.c_str());
+ log_flush();
+}
+
+void log_always(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv(format, ap, LogLevel::ALWAYS_MSG);
+ va_end(ap);
+}
+
+void log(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv(format, ap, LogLevel::LOG_MSG);
+ va_end(ap);
+}
+
+void log_info(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv_prefixed("Info: ", format, ap, LogLevel::INFO_MSG);
+ va_end(ap);
+}
+
+void log_warning(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv_prefixed("Warning: ", format, ap, LogLevel::WARNING_MSG);
+ va_end(ap);
+}
+
+void log_error(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv_prefixed("ERROR: ", format, ap, LogLevel::ERROR_MSG);
+
+ if (log_error_atexit)
+ log_error_atexit();
+
+ throw log_execution_error_exception();
+}
+
+void log_break()
+{
+ if (log_newline_count < 2)
+ log("\n");
+ if (log_newline_count < 2)
+ log("\n");
+}
+
+void log_nonfatal_error(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ logv_prefixed("ERROR: ", format, ap, LogLevel::ERROR_MSG);
+ va_end(ap);
+ had_nonfatal_error = true;
+}
+
+void log_flush()
+{
+ for (auto f : log_streams)
+ f.first->flush();
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/log.h b/common/kernel/log.h
new file mode 100644
index 00000000..0ac4edf5
--- /dev/null
+++ b/common/kernel/log.h
@@ -0,0 +1,92 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef LOG_H
+#define LOG_H
+
+#include <functional>
+#include <ostream>
+#include <set>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string>
+#include <vector>
+#include "hashlib.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+typedef std::function<void(std::string)> log_write_type;
+
+struct log_cmd_error_exception
+{
+};
+
+struct log_execution_error_exception
+{
+};
+
+enum class LogLevel
+{
+ LOG_MSG,
+ INFO_MSG,
+ WARNING_MSG,
+ ERROR_MSG,
+ ALWAYS_MSG
+};
+
+struct loglevel_hash_ops
+{
+ static inline bool cmp(LogLevel a, LogLevel b) { return a == b; }
+ static inline unsigned int hash(LogLevel a) { return unsigned(a); }
+};
+
+extern std::vector<std::pair<std::ostream *, LogLevel>> log_streams;
+extern log_write_type log_write_function;
+
+extern std::string log_last_error;
+extern void (*log_error_atexit)();
+extern bool had_nonfatal_error;
+extern dict<LogLevel, int, loglevel_hash_ops> message_count_by_level;
+
+std::string stringf(const char *fmt, ...);
+std::string vstringf(const char *fmt, va_list ap);
+
+void log(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2));
+void log_always(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2));
+void log_info(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2));
+void log_warning(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2));
+NPNR_NORETURN void log_error(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2), noreturn);
+void log_nonfatal_error(const char *format, ...) NPNR_ATTRIBUTE(format(printf, 1, 2));
+void log_break();
+void log_flush();
+
+static inline void log_assert_worker(bool cond, const char *expr, const char *file, int line)
+{
+ if (!cond)
+ log_error("Assert `%s' failed in %s:%d.\n", expr, file, line);
+}
+#define log_assert(_assert_expr_) \
+ NEXTPNR_NAMESPACE_PREFIX log_assert_worker(_assert_expr_, #_assert_expr_, __FILE__, __LINE__)
+
+#define log_abort() log_error("Abort in %s:%d.\n", __FILE__, __LINE__)
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/nextpnr.cc b/common/kernel/nextpnr.cc
new file mode 100644
index 00000000..8c902d88
--- /dev/null
+++ b/common/kernel/nextpnr.cc
@@ -0,0 +1,35 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#if defined(__wasm)
+#include <exception>
+#include <typeinfo>
+#include "log.h"
+
+extern "C" {
+// FIXME: WASI does not currently support exceptions.
+void *__cxa_allocate_exception(size_t thrown_size) throw() { return malloc(thrown_size); }
+bool __cxa_uncaught_exception() throw();
+void __cxa_throw(void *thrown_exception, struct std::type_info *tinfo, void (*dest)(void *)) { std::terminate(); }
+}
+
+namespace boost {
+void throw_exception(std::exception const &e) { NEXTPNR_NAMESPACE::log_error("boost::exception(): %s\n", e.what()); }
+} // namespace boost
+#endif
diff --git a/common/kernel/nextpnr.h b/common/kernel/nextpnr.h
new file mode 100644
index 00000000..3b65900b
--- /dev/null
+++ b/common/kernel/nextpnr.h
@@ -0,0 +1,29 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+#ifndef NEXTPNR_H
+#define NEXTPNR_H
+
+#include "base_arch.h"
+#include "context.h"
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+#include "nextpnr_types.h"
+
+#endif
diff --git a/common/kernel/nextpnr_assertions.cc b/common/kernel/nextpnr_assertions.cc
new file mode 100644
index 00000000..ac4cdf57
--- /dev/null
+++ b/common/kernel/nextpnr_assertions.cc
@@ -0,0 +1,33 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "nextpnr_assertions.h"
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+assertion_failure::assertion_failure(std::string msg, std::string expr_str, std::string filename, int line)
+ : runtime_error("Assertion failure: " + msg + " (" + filename + ":" + std::to_string(line) + ")"), msg(msg),
+ expr_str(expr_str), filename(filename), line(line)
+{
+ log_flush();
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/nextpnr_assertions.h b/common/kernel/nextpnr_assertions.h
new file mode 100644
index 00000000..1989aa3a
--- /dev/null
+++ b/common/kernel/nextpnr_assertions.h
@@ -0,0 +1,64 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef NEXTPNR_ASSERTIONS_H
+#define NEXTPNR_ASSERTIONS_H
+
+#include <stdexcept>
+#include <string>
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+class assertion_failure : public std::runtime_error
+{
+ public:
+ assertion_failure(std::string msg, std::string expr_str, std::string filename, int line);
+
+ std::string msg;
+ std::string expr_str;
+ std::string filename;
+ int line;
+};
+
+NPNR_NORETURN
+inline void assert_fail_impl(const char *message, const char *expr_str, const char *filename, int line)
+{
+ throw assertion_failure(message, expr_str, filename, line);
+}
+
+NPNR_NORETURN
+inline void assert_fail_impl_str(std::string message, const char *expr_str, const char *filename, int line)
+{
+ throw assertion_failure(message, expr_str, filename, line);
+}
+
+#define NPNR_ASSERT(cond) (!(cond) ? assert_fail_impl(#cond, #cond, __FILE__, __LINE__) : (void)true)
+#define NPNR_ASSERT_MSG(cond, msg) (!(cond) ? assert_fail_impl(msg, #cond, __FILE__, __LINE__) : (void)true)
+#define NPNR_ASSERT_FALSE(msg) (assert_fail_impl(msg, "false", __FILE__, __LINE__))
+#define NPNR_ASSERT_FALSE_STR(msg) (assert_fail_impl_str(msg, "false", __FILE__, __LINE__))
+
+#define NPNR_STRINGIFY_MACRO(x) NPNR_STRINGIFY(x)
+#define NPNR_STRINGIFY(x) #x
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* NEXTPNR_ASSERTIONS_H */
diff --git a/common/kernel/nextpnr_base_types.h b/common/kernel/nextpnr_base_types.h
new file mode 100644
index 00000000..944bf0b8
--- /dev/null
+++ b/common/kernel/nextpnr_base_types.h
@@ -0,0 +1,135 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+// Theses are the nextpnr types that do **not** depend on user defined types,
+// like BelId, etc.
+//
+// If a common type is required that depends on one of the user defined types,
+// add it to nextpnr_types.h, which includes "archdefs.h", or make a new
+// header that includes "archdefs.h"
+#ifndef NEXTPNR_BASE_TYPES_H
+#define NEXTPNR_BASE_TYPES_H
+
+#include <boost/functional/hash.hpp>
+#include <string>
+
+#include "hashlib.h"
+#include "idstring.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct GraphicElement
+{
+ enum type_t
+ {
+ TYPE_NONE,
+ TYPE_LINE,
+ TYPE_ARROW,
+ TYPE_BOX,
+ TYPE_CIRCLE,
+ TYPE_LABEL,
+ TYPE_LOCAL_ARROW, // Located entirely within the cell boundaries, coordinates in the range [0., 1.]
+ TYPE_LOCAL_LINE,
+
+ TYPE_MAX
+ } type = TYPE_NONE;
+
+ enum style_t
+ {
+ STYLE_GRID,
+ STYLE_FRAME, // Static "frame". Contrast between STYLE_INACTIVE and STYLE_ACTIVE
+ STYLE_HIDDEN, // Only display when object is selected or highlighted
+ STYLE_INACTIVE, // Render using low-contrast color
+ STYLE_ACTIVE, // Render using high-contast color
+
+ // UI highlight groups
+ STYLE_HIGHLIGHTED0,
+ STYLE_HIGHLIGHTED1,
+ STYLE_HIGHLIGHTED2,
+ STYLE_HIGHLIGHTED3,
+ STYLE_HIGHLIGHTED4,
+ STYLE_HIGHLIGHTED5,
+ STYLE_HIGHLIGHTED6,
+ STYLE_HIGHLIGHTED7,
+
+ STYLE_SELECTED,
+ STYLE_HOVER,
+
+ STYLE_MAX
+ } style = STYLE_FRAME;
+
+ float x1 = 0, y1 = 0, x2 = 0, y2 = 0, z = 0;
+ std::string text;
+ GraphicElement(){};
+ GraphicElement(type_t type, style_t style, float x1, float y1, float x2, float y2, float z)
+ : type(type), style(style), x1(x1), y1(y1), x2(x2), y2(y2), z(z){};
+};
+
+struct Loc
+{
+ int x = -1, y = -1, z = -1;
+
+ Loc() {}
+ Loc(int x, int y, int z) : x(x), y(y), z(z) {}
+
+ bool operator==(const Loc &other) const { return (x == other.x) && (y == other.y) && (z == other.z); }
+ bool operator!=(const Loc &other) const { return (x != other.x) || (y != other.y) || (z != other.z); }
+ unsigned int hash() const { return mkhash(x, mkhash(y, z)); }
+};
+
+struct ArcBounds
+{
+ int x0 = -1, y0 = -1, x1 = -1, y1 = -1;
+
+ ArcBounds() {}
+ ArcBounds(int x0, int y0, int x1, int y1) : x0(x0), y0(y0), x1(x1), y1(y1){};
+
+ int distance(Loc loc) const
+ {
+ int dist = 0;
+ if (loc.x < x0)
+ dist += x0 - loc.x;
+ if (loc.x > x1)
+ dist += loc.x - x1;
+ if (loc.y < y0)
+ dist += y0 - loc.y;
+ if (loc.y > y1)
+ dist += loc.y - y1;
+ return dist;
+ };
+
+ bool contains(int x, int y) const { return x >= x0 && y >= y0 && x <= x1 && y <= y1; }
+};
+
+enum PlaceStrength
+{
+ STRENGTH_NONE = 0,
+ STRENGTH_WEAK = 1,
+ STRENGTH_STRONG = 2,
+ STRENGTH_PLACER = 3,
+ STRENGTH_FIXED = 4,
+ STRENGTH_LOCKED = 5,
+ STRENGTH_USER = 6
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* NEXTPNR_BASE_TYPES_H */
diff --git a/common/kernel/nextpnr_namespaces.cc b/common/kernel/nextpnr_namespaces.cc
new file mode 100644
index 00000000..802c89b4
--- /dev/null
+++ b/common/kernel/nextpnr_namespaces.cc
@@ -0,0 +1,23 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+// This cc file exists to ensure that "nextpnr_namespaces.h" can be compiled
+// on its own.
+#include "nextpnr_namespaces.h"
diff --git a/common/kernel/nextpnr_namespaces.h b/common/kernel/nextpnr_namespaces.h
new file mode 100644
index 00000000..b758d7c5
--- /dev/null
+++ b/common/kernel/nextpnr_namespaces.h
@@ -0,0 +1,58 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef NEXTPNR_NAMESPACES_H
+#define NEXTPNR_NAMESPACES_H
+
+#ifdef NEXTPNR_NAMESPACE
+#define NEXTPNR_NAMESPACE_PREFIX NEXTPNR_NAMESPACE::
+#define NEXTPNR_NAMESPACE_BEGIN namespace NEXTPNR_NAMESPACE {
+#define NEXTPNR_NAMESPACE_END }
+#define USING_NEXTPNR_NAMESPACE using namespace NEXTPNR_NAMESPACE;
+#else
+#define NEXTPNR_NAMESPACE_PREFIX
+#define NEXTPNR_NAMESPACE_BEGIN
+#define NEXTPNR_NAMESPACE_END
+#define USING_NEXTPNR_NAMESPACE
+#endif
+
+#define NPNR_UNUSED(x) ((void)x)
+
+#if defined(__GNUC__) || defined(__clang__)
+#define NPNR_ATTRIBUTE(...) __attribute__((__VA_ARGS__))
+#define NPNR_NORETURN __attribute__((noreturn))
+#define NPNR_DEPRECATED __attribute__((deprecated))
+#define NPNR_PACKED_STRUCT(...) __VA_ARGS__ __attribute__((packed))
+#define NPNR_ALWAYS_INLINE NPNR_ATTRIBUTE(__always_inline__)
+#elif defined(_MSC_VER)
+#define NPNR_ATTRIBUTE(...)
+#define NPNR_NORETURN __declspec(noreturn)
+#define NPNR_DEPRECATED __declspec(deprecated)
+#define NPNR_PACKED_STRUCT(...) __pragma(pack(push, 1)) __VA_ARGS__ __pragma(pack(pop))
+#define NPNR_ALWAYS_INLINE
+#else
+#define NPNR_ATTRIBUTE(...)
+#define NPNR_NORETURN
+#define NPNR_DEPRECATED
+#define NPNR_PACKED_STRUCT(...) __VA_ARGS__
+#define NPNR_ALWAYS_INLINE
+#endif
+
+#endif /* NEXTPNR_NAMESPACES_H */
diff --git a/common/kernel/nextpnr_types.cc b/common/kernel/nextpnr_types.cc
new file mode 100644
index 00000000..57d816c0
--- /dev/null
+++ b/common/kernel/nextpnr_types.cc
@@ -0,0 +1,180 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "nextpnr_types.h"
+#include "context.h"
+#include "log.h"
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+void CellInfo::addInput(IdString name)
+{
+ ports[name].name = name;
+ ports[name].type = PORT_IN;
+}
+void CellInfo::addOutput(IdString name)
+{
+ ports[name].name = name;
+ ports[name].type = PORT_OUT;
+}
+void CellInfo::addInout(IdString name)
+{
+ ports[name].name = name;
+ ports[name].type = PORT_INOUT;
+}
+
+void CellInfo::setParam(IdString name, Property value) { params[name] = value; }
+void CellInfo::unsetParam(IdString name) { params.erase(name); }
+void CellInfo::setAttr(IdString name, Property value) { attrs[name] = value; }
+void CellInfo::unsetAttr(IdString name) { attrs.erase(name); }
+
+bool CellInfo::testRegion(BelId bel) const
+{
+ return region == nullptr || !region->constr_bels || region->bels.count(bel);
+}
+
+void CellInfo::connectPort(IdString port_name, NetInfo *net)
+{
+ if (net == nullptr)
+ return;
+ PortInfo &port = ports.at(port_name);
+ NPNR_ASSERT(port.net == nullptr);
+ port.net = net;
+ if (port.type == PORT_OUT) {
+ NPNR_ASSERT(net->driver.cell == nullptr);
+ net->driver.cell = this;
+ net->driver.port = port_name;
+ } else if (port.type == PORT_IN || port.type == PORT_INOUT) {
+ PortRef user;
+ user.cell = this;
+ user.port = port_name;
+ port.user_idx = net->users.add(user);
+ } else {
+ NPNR_ASSERT_FALSE("invalid port type for connect_port");
+ }
+}
+
+void CellInfo::disconnectPort(IdString port_name)
+{
+ if (!ports.count(port_name))
+ return;
+ PortInfo &port = ports.at(port_name);
+ if (port.net != nullptr) {
+ if (port.user_idx)
+ port.net->users.remove(port.user_idx);
+ if (port.net->driver.cell == this && port.net->driver.port == port_name)
+ port.net->driver.cell = nullptr;
+ port.net = nullptr;
+ }
+}
+
+void CellInfo::connectPorts(IdString port, CellInfo *other, IdString other_port)
+{
+ PortInfo &port1 = ports.at(port);
+ if (port1.net == nullptr) {
+ // No net on port1; need to create one
+ NetInfo *p1net = ctx->createNet(ctx->id(name.str(ctx) + "$conn$" + port.str(ctx)));
+ connectPort(port, p1net);
+ }
+ other->connectPort(other_port, port1.net);
+}
+
+void CellInfo::movePortTo(IdString port, CellInfo *other, IdString other_port)
+{
+ if (!ports.count(port))
+ return;
+ PortInfo &old = ports.at(port);
+
+ // Create port on the replacement cell if it doesn't already exist
+ if (!other->ports.count(other_port)) {
+ other->ports[other_port].name = other_port;
+ other->ports[other_port].type = old.type;
+ }
+
+ PortInfo &rep = other->ports.at(other_port);
+ NPNR_ASSERT(old.type == rep.type);
+
+ rep.net = old.net;
+ rep.user_idx = old.user_idx;
+ old.net = nullptr;
+ old.user_idx = store_index<PortRef>{};
+ if (rep.type == PORT_OUT) {
+ if (rep.net != nullptr) {
+ rep.net->driver.cell = other;
+ rep.net->driver.port = other_port;
+ }
+ } else if (rep.type == PORT_IN) {
+ if (rep.net != nullptr) {
+ auto &load = rep.net->users.at(rep.user_idx);
+ load.cell = other;
+ load.port = other_port;
+ }
+ } else {
+ NPNR_ASSERT(false);
+ }
+}
+
+void CellInfo::renamePort(IdString old_name, IdString new_name)
+{
+ if (!ports.count(old_name))
+ return;
+ PortInfo pi = ports.at(old_name);
+ if (pi.net != nullptr) {
+ if (pi.net->driver.cell == this && pi.net->driver.port == old_name)
+ pi.net->driver.port = new_name;
+ if (pi.user_idx)
+ pi.net->users.at(pi.user_idx).port = new_name;
+ }
+ ports.erase(old_name);
+ pi.name = new_name;
+ ports[new_name] = pi;
+}
+
+void CellInfo::movePortBusTo(IdString old_name, int old_offset, bool old_brackets, CellInfo *new_cell,
+ IdString new_name, int new_offset, bool new_brackets, int width)
+{
+ for (int i = 0; i < width; i++) {
+ IdString old_port = ctx->id(stringf(old_brackets ? "%s[%d]" : "%s%d", old_name.c_str(ctx), i + old_offset));
+ IdString new_port = ctx->id(stringf(new_brackets ? "%s[%d]" : "%s%d", new_name.c_str(ctx), i + new_offset));
+ movePortTo(old_port, new_cell, new_port);
+ }
+}
+
+void CellInfo::copyPortTo(IdString port, CellInfo *other, IdString other_port)
+{
+ if (!ports.count(port))
+ return;
+ other->ports[other_port].name = other_port;
+ other->ports[other_port].type = ports.at(port).type;
+ other->connectPort(other_port, ports.at(port).net);
+}
+
+void CellInfo::copyPortBusTo(IdString old_name, int old_offset, bool old_brackets, CellInfo *new_cell,
+ IdString new_name, int new_offset, bool new_brackets, int width)
+{
+ for (int i = 0; i < width; i++) {
+ IdString old_port = ctx->id(stringf(old_brackets ? "%s[%d]" : "%s%d", old_name.c_str(ctx), i + old_offset));
+ IdString new_port = ctx->id(stringf(new_brackets ? "%s[%d]" : "%s%d", new_name.c_str(ctx), i + new_offset));
+ copyPortTo(old_port, new_cell, new_port);
+ }
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/nextpnr_types.h b/common/kernel/nextpnr_types.h
new file mode 100644
index 00000000..c21182cc
--- /dev/null
+++ b/common/kernel/nextpnr_types.h
@@ -0,0 +1,364 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+// Types defined in this header use one or more user defined types (e.g. BelId).
+// If a new common type is desired that doesn't depend on a user defined type,
+// either put it in it's own header, or in nextpnr_base_types.h.
+#ifndef NEXTPNR_TYPES_H
+#define NEXTPNR_TYPES_H
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "archdefs.h"
+#include "hashlib.h"
+#include "indexed_store.h"
+#include "nextpnr_base_types.h"
+#include "nextpnr_namespaces.h"
+#include "property.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct DecalXY
+{
+ DecalId decal;
+ float x = 0, y = 0;
+
+ bool operator==(const DecalXY &other) const { return (decal == other.decal && x == other.x && y == other.y); }
+};
+
+struct BelPin
+{
+ BelId bel;
+ IdString pin;
+};
+
+struct Region
+{
+ IdString name;
+
+ bool constr_bels = false;
+ bool constr_wires = false;
+ bool constr_pips = false;
+
+ pool<BelId> bels;
+ pool<WireId> wires;
+ pool<Loc> piplocs;
+};
+
+struct PipMap
+{
+ PipId pip = PipId();
+ PlaceStrength strength = STRENGTH_NONE;
+};
+
+struct CellInfo;
+
+struct PortRef
+{
+ CellInfo *cell = nullptr;
+ IdString port;
+ delay_t budget = 0;
+};
+
+// minimum and maximum delay
+struct DelayPair
+{
+ DelayPair(){};
+ explicit DelayPair(delay_t delay) : min_delay(delay), max_delay(delay){};
+ DelayPair(delay_t min_delay, delay_t max_delay) : min_delay(min_delay), max_delay(max_delay){};
+ delay_t minDelay() const { return min_delay; };
+ delay_t maxDelay() const { return max_delay; };
+ delay_t min_delay, max_delay;
+ DelayPair operator+(const DelayPair &other) const
+ {
+ return {min_delay + other.min_delay, max_delay + other.max_delay};
+ }
+ DelayPair operator-(const DelayPair &other) const
+ {
+ return {min_delay - other.min_delay, max_delay - other.max_delay};
+ }
+};
+
+// four-quadrant, min and max rise and fall delay
+struct DelayQuad
+{
+ DelayPair rise, fall;
+ DelayQuad(){};
+ explicit DelayQuad(delay_t delay) : rise(delay), fall(delay){};
+ DelayQuad(delay_t min_delay, delay_t max_delay) : rise(min_delay, max_delay), fall(min_delay, max_delay){};
+ DelayQuad(DelayPair rise, DelayPair fall) : rise(rise), fall(fall){};
+ DelayQuad(delay_t min_rise, delay_t max_rise, delay_t min_fall, delay_t max_fall)
+ : rise(min_rise, max_rise), fall(min_fall, max_fall){};
+
+ delay_t minRiseDelay() const { return rise.minDelay(); };
+ delay_t maxRiseDelay() const { return rise.maxDelay(); };
+ delay_t minFallDelay() const { return fall.minDelay(); };
+ delay_t maxFallDelay() const { return fall.maxDelay(); };
+ delay_t minDelay() const { return std::min<delay_t>(rise.minDelay(), fall.minDelay()); };
+ delay_t maxDelay() const { return std::max<delay_t>(rise.maxDelay(), fall.maxDelay()); };
+
+ DelayPair delayPair() const { return DelayPair(minDelay(), maxDelay()); };
+
+ DelayQuad operator+(const DelayQuad &other) const { return {rise + other.rise, fall + other.fall}; }
+ DelayQuad operator-(const DelayQuad &other) const { return {rise - other.rise, fall - other.fall}; }
+};
+
+struct ClockConstraint;
+
+struct NetInfo : ArchNetInfo
+{
+ explicit NetInfo(IdString name) : name(name){};
+ IdString name, hierpath;
+ int32_t udata = 0;
+
+ PortRef driver;
+ indexed_store<PortRef> users;
+ dict<IdString, Property> attrs;
+
+ // wire -> uphill_pip
+ dict<WireId, PipMap> wires;
+
+ std::vector<IdString> aliases; // entries in net_aliases that point to this net
+
+ std::unique_ptr<ClockConstraint> clkconstr;
+
+ Region *region = nullptr;
+};
+
+enum PortType
+{
+ PORT_IN = 0,
+ PORT_OUT = 1,
+ PORT_INOUT = 2
+};
+
+struct PortInfo
+{
+ IdString name;
+ NetInfo *net;
+ PortType type;
+ store_index<PortRef> user_idx{};
+};
+
+struct Context;
+
+struct CellInfo : ArchCellInfo
+{
+ CellInfo(Context *ctx, IdString name, IdString type) : ctx(ctx), name(name), type(type){};
+ Context *ctx = nullptr;
+
+ IdString name, type, hierpath;
+ int32_t udata;
+
+ dict<IdString, PortInfo> ports;
+ dict<IdString, Property> attrs, params;
+
+ BelId bel;
+ PlaceStrength belStrength = STRENGTH_NONE;
+
+ // cell is part of a cluster if != ClusterId
+ ClusterId cluster;
+
+ Region *region = nullptr;
+
+ void addInput(IdString name);
+ void addOutput(IdString name);
+ void addInout(IdString name);
+
+ void setParam(IdString name, Property value);
+ void unsetParam(IdString name);
+ void setAttr(IdString name, Property value);
+ void unsetAttr(IdString name);
+ // check whether a bel complies with the cell's region constraint
+ bool testRegion(BelId bel) const;
+
+ NetInfo *getPort(IdString name)
+ {
+ auto found = ports.find(name);
+ return (found == ports.end()) ? nullptr : found->second.net;
+ }
+ const NetInfo *getPort(IdString name) const
+ {
+ auto found = ports.find(name);
+ return (found == ports.end()) ? nullptr : found->second.net;
+ }
+ void connectPort(IdString port, NetInfo *net);
+ void disconnectPort(IdString port);
+ void connectPorts(IdString port, CellInfo *other, IdString other_port);
+ void movePortTo(IdString port, CellInfo *other, IdString other_port);
+ void renamePort(IdString old_name, IdString new_name);
+ void movePortBusTo(IdString old_name, int old_offset, bool old_brackets, CellInfo *new_cell, IdString new_name,
+ int new_offset, bool new_brackets, int width);
+ void copyPortTo(IdString port, CellInfo *other, IdString other_port);
+ void copyPortBusTo(IdString old_name, int old_offset, bool old_brackets, CellInfo *new_cell, IdString new_name,
+ int new_offset, bool new_brackets, int width);
+};
+
+enum TimingPortClass
+{
+ TMG_CLOCK_INPUT, // Clock input to a sequential cell
+ TMG_GEN_CLOCK, // Generated clock output (PLL, DCC, etc)
+ TMG_REGISTER_INPUT, // Input to a register, with an associated clock (may also have comb. fanout too)
+ TMG_REGISTER_OUTPUT, // Output from a register
+ TMG_COMB_INPUT, // Combinational input, no paths end here
+ TMG_COMB_OUTPUT, // Combinational output, no paths start here
+ TMG_STARTPOINT, // Unclocked primary startpoint, such as an IO cell output
+ TMG_ENDPOINT, // Unclocked primary endpoint, such as an IO cell input
+ TMG_IGNORE, // Asynchronous to all clocks, "don't care", and should be ignored (false path) for analysis
+};
+
+enum ClockEdge
+{
+ RISING_EDGE,
+ FALLING_EDGE
+};
+
+struct TimingClockingInfo
+{
+ IdString clock_port; // Port name of clock domain
+ ClockEdge edge;
+ DelayPair setup, hold; // Input timing checks
+ DelayQuad clockToQ; // Output clock-to-Q time
+};
+
+struct ClockConstraint
+{
+ DelayPair high;
+ DelayPair low;
+ DelayPair period;
+};
+
+struct ClockFmax
+{
+ float achieved;
+ float constraint;
+};
+
+struct ClockEvent
+{
+ IdString clock;
+ ClockEdge edge;
+
+ bool operator==(const ClockEvent &other) const { return clock == other.clock && edge == other.edge; }
+ unsigned int hash() const { return mkhash(clock.hash(), int(edge)); }
+};
+
+struct ClockPair
+{
+ ClockEvent start, end;
+
+ bool operator==(const ClockPair &other) const { return start == other.start && end == other.end; }
+ unsigned int hash() const { return mkhash(start.hash(), end.hash()); }
+};
+
+struct CriticalPath
+{
+ struct Segment
+ {
+
+ // Segment type
+ enum class Type
+ {
+ CLK_TO_Q, // Clock-to-Q delay
+ SOURCE, // Delayless source
+ LOGIC, // Combinational logic delay
+ ROUTING, // Routing delay
+ SETUP // Setup time in sink
+ };
+
+ // Type
+ Type type;
+ // Net name (routing only)
+ IdString net;
+ // From cell.port
+ std::pair<IdString, IdString> from;
+ // To cell.port
+ std::pair<IdString, IdString> to;
+ // Segment delay
+ delay_t delay;
+ // Segment budget (routing only)
+ delay_t budget;
+ };
+
+ // Clock pair
+ ClockPair clock_pair;
+ // Total path delay
+ delay_t delay;
+ // Period (max allowed delay)
+ delay_t period;
+ // Individual path segments
+ std::vector<Segment> segments;
+};
+
+// Holds timing information of a single source to sink path of a net
+struct NetSinkTiming
+{
+ // Clock event pair
+ ClockPair clock_pair;
+ // Cell and port (the sink)
+ std::pair<IdString, IdString> cell_port;
+ // Delay
+ delay_t delay;
+ // Delay budget
+ delay_t budget;
+};
+
+struct TimingResult
+{
+ // Achieved and target Fmax for all clock domains
+ dict<IdString, ClockFmax> clock_fmax;
+ // Single domain critical paths
+ dict<IdString, CriticalPath> clock_paths;
+ // Cross-domain critical paths
+ std::vector<CriticalPath> xclock_paths;
+
+ // Detailed net timing data
+ dict<IdString, std::vector<NetSinkTiming>> detailed_net_timings;
+};
+
+// Represents the contents of a non-leaf cell in a design
+// with hierarchy
+
+struct HierarchicalPort
+{
+ IdString name;
+ PortType dir;
+ std::vector<IdString> nets;
+ int offset;
+ bool upto;
+};
+
+struct HierarchicalCell
+{
+ IdString name, type, parent, fullpath;
+ // Name inside cell instance -> global name
+ dict<IdString, IdString> leaf_cells, nets;
+ // Global name -> name inside cell instance
+ dict<IdString, IdString> leaf_cells_by_gname, nets_by_gname;
+ // Cell port to net
+ dict<IdString, HierarchicalPort> ports;
+ // Name inside cell instance -> global name
+ dict<IdString, IdString> hier_cells;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* NEXTPNR_TYPES_H */
diff --git a/common/kernel/property.cc b/common/kernel/property.cc
new file mode 100644
index 00000000..6c30436d
--- /dev/null
+++ b/common/kernel/property.cc
@@ -0,0 +1,80 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "property.h"
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+Property::Property() : is_string(false), str(""), intval(0) {}
+
+Property::Property(int64_t intval, int width) : is_string(false), intval(intval)
+{
+ str.reserve(width);
+ for (int i = 0; i < width; i++)
+ str.push_back((intval & (1ULL << i)) ? S1 : S0);
+}
+
+Property::Property(const std::string &strval) : is_string(true), str(strval), intval(0xDEADBEEF) {}
+
+Property::Property(State bit) : is_string(false), str(std::string("") + char(bit)), intval(bit == S1) {}
+
+std::string Property::to_string() const
+{
+ if (is_string) {
+ std::string result = str;
+ int state = 0;
+ for (char c : str) {
+ if (state == 0) {
+ if (c == '0' || c == '1' || c == 'x' || c == 'z')
+ state = 0;
+ else if (c == ' ')
+ state = 1;
+ else
+ state = 2;
+ } else if (state == 1 && c != ' ')
+ state = 2;
+ }
+ if (state < 2)
+ result += " ";
+ return result;
+ } else {
+ return std::string(str.rbegin(), str.rend());
+ }
+}
+
+Property Property::from_string(const std::string &s)
+{
+ Property p;
+
+ size_t cursor = s.find_first_not_of("01xz");
+ if (cursor == std::string::npos) {
+ p.str = std::string(s.rbegin(), s.rend());
+ p.is_string = false;
+ p.update_intval();
+ } else if (s.find_first_not_of(' ', cursor) == std::string::npos) {
+ p = Property(s.substr(0, s.size() - 1));
+ } else {
+ p = Property(s);
+ }
+ return p;
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/property.h b/common/kernel/property.h
new file mode 100644
index 00000000..814b2cac
--- /dev/null
+++ b/common/kernel/property.h
@@ -0,0 +1,131 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef PROPERTY_H
+#define PROPERTY_H
+
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct Property
+{
+ enum State : char
+ {
+ S0 = '0',
+ S1 = '1',
+ Sx = 'x',
+ Sz = 'z'
+ };
+
+ Property();
+ Property(int64_t intval, int width = 32);
+ Property(const std::string &strval);
+ Property(State bit);
+ Property &operator=(const Property &other) = default;
+
+ bool is_string;
+
+ // The string literal (for string values), or a string of [01xz] (for numeric values)
+ std::string str;
+ // The lower 64 bits (for numeric values), unused for string values
+ int64_t intval;
+
+ void update_intval()
+ {
+ intval = 0;
+ for (int i = 0; i < int(str.size()); i++) {
+ NPNR_ASSERT(str[i] == S0 || str[i] == S1 || str[i] == Sx || str[i] == Sz);
+ if ((str[i] == S1) && i < 64)
+ intval |= (1ULL << i);
+ }
+ }
+
+ int64_t as_int64() const
+ {
+ NPNR_ASSERT(!is_string);
+ return intval;
+ }
+ std::vector<bool> as_bits() const
+ {
+ std::vector<bool> result;
+ result.reserve(str.size());
+ NPNR_ASSERT(!is_string);
+ for (auto c : str)
+ result.push_back(c == S1);
+ return result;
+ }
+ const std::string &as_string() const
+ {
+ NPNR_ASSERT(is_string);
+ return str;
+ }
+ const char *c_str() const
+ {
+ NPNR_ASSERT(is_string);
+ return str.c_str();
+ }
+ size_t size() const { return is_string ? 8 * str.size() : str.size(); }
+ double as_double() const
+ {
+ NPNR_ASSERT(is_string);
+ return std::stod(str);
+ }
+ bool as_bool() const
+ {
+ if (int(str.size()) <= 64)
+ return intval != 0;
+ else
+ return std::any_of(str.begin(), str.end(), [](char c) { return c == S1; });
+ }
+ bool is_fully_def() const
+ {
+ return !is_string && std::all_of(str.begin(), str.end(), [](char c) { return c == S0 || c == S1; });
+ }
+ Property extract(int offset, int len, State padding = State::S0) const
+ {
+ Property ret;
+ ret.is_string = false;
+ ret.str.reserve(len);
+ for (int i = offset; i < offset + len; i++)
+ ret.str.push_back(i < int(str.size()) ? str[i] : char(padding));
+ ret.update_intval();
+ return ret;
+ }
+ // Convert to a string representation, escaping literal strings matching /^[01xz]* *$/ by adding a space at the end,
+ // to disambiguate from binary strings
+ std::string to_string() const;
+ // Convert a string of four-value binary [01xz], or a literal string escaped according to the above rule
+ // to a Property
+ static Property from_string(const std::string &s);
+};
+
+inline bool operator==(const Property &a, const Property &b) { return a.is_string == b.is_string && a.str == b.str; }
+inline bool operator!=(const Property &a, const Property &b) { return a.is_string != b.is_string || a.str != b.str; }
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* PROPERTY_H */
diff --git a/common/kernel/pybindings.cc b/common/kernel/pybindings.cc
new file mode 100644
index 00000000..9a783eb4
--- /dev/null
+++ b/common/kernel/pybindings.cc
@@ -0,0 +1,362 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef NO_PYTHON
+
+#include "pybindings.h"
+#include "arch_pybindings.h"
+#include "json_frontend.h"
+#include "log.h"
+#include "nextpnr.h"
+
+#include <fstream>
+#include <memory>
+#include <signal.h>
+NEXTPNR_NAMESPACE_BEGIN
+
+// Required to determine concatenated module name (which differs for different
+// archs)
+#define PASTER(x, y) x##_##y
+#define EVALUATOR(x, y) PASTER(x, y)
+#define MODULE_NAME EVALUATOR(nextpnrpy, ARCHNAME)
+#define PYINIT_MODULE_NAME EVALUATOR(&PyInit_nextpnrpy, ARCHNAME)
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+
+// Architecture-specific bindings should be created in the below function, which
+// must be implemented in all architectures
+void arch_wrap_python(py::module &m);
+
+bool operator==(const PortRef &a, const PortRef &b) { return (a.cell == b.cell) && (a.port == b.port); }
+
+// Load a JSON file into a design
+void parse_json_shim(std::string filename, Context &d)
+{
+ std::ifstream inf(filename);
+ if (!inf)
+ throw std::runtime_error("failed to open file " + filename);
+ parse_json(inf, filename, &d);
+}
+
+// Create a new Chip and load design from json file
+Context *load_design_shim(std::string filename, ArchArgs args)
+{
+ Context *d = new Context(args);
+ parse_json_shim(filename, *d);
+ return d;
+}
+
+namespace PythonConversion {
+template <> struct string_converter<PortRef &>
+{
+ inline PortRef from_str(Context *ctx, std::string name) { NPNR_ASSERT_FALSE("PortRef from_str not implemented"); }
+
+ inline std::string to_str(Context *ctx, const PortRef &pr)
+ {
+ return pr.cell->name.str(ctx) + "." + pr.port.str(ctx);
+ }
+};
+
+template <> struct string_converter<Property>
+{
+ inline Property from_str(Context *ctx, std::string s) { return Property::from_string(s); }
+
+ inline std::string to_str(Context *ctx, Property p) { return p.to_string(); }
+};
+
+} // namespace PythonConversion
+
+std::string loc_repr_py(Loc loc) { return stringf("Loc(%d, %d, %d)", loc.x, loc.y, loc.z); }
+
+PYBIND11_EMBEDDED_MODULE(MODULE_NAME, m)
+{
+ py::register_exception_translator([](std::exception_ptr p) {
+ try {
+ if (p)
+ std::rethrow_exception(p);
+ } catch (const assertion_failure &e) {
+ PyErr_SetString(PyExc_AssertionError, e.what());
+ }
+ });
+
+ using namespace PythonConversion;
+
+ py::enum_<GraphicElement::type_t>(m, "GraphicElementType")
+ .value("TYPE_NONE", GraphicElement::TYPE_NONE)
+ .value("TYPE_LINE", GraphicElement::TYPE_LINE)
+ .value("TYPE_ARROW", GraphicElement::TYPE_ARROW)
+ .value("TYPE_BOX", GraphicElement::TYPE_BOX)
+ .value("TYPE_CIRCLE", GraphicElement::TYPE_CIRCLE)
+ .value("TYPE_LABEL", GraphicElement::TYPE_LABEL)
+ .export_values();
+
+ py::enum_<GraphicElement::style_t>(m, "GraphicElementStyle")
+ .value("STYLE_GRID", GraphicElement::STYLE_GRID)
+ .value("STYLE_FRAME", GraphicElement::STYLE_FRAME)
+ .value("STYLE_HIDDEN", GraphicElement::STYLE_HIDDEN)
+ .value("STYLE_INACTIVE", GraphicElement::STYLE_INACTIVE)
+ .value("STYLE_ACTIVE", GraphicElement::STYLE_ACTIVE)
+ .export_values();
+
+ py::class_<GraphicElement>(m, "GraphicElement")
+ .def(py::init<GraphicElement::type_t, GraphicElement::style_t, float, float, float, float, float>(),
+ py::arg("type"), py::arg("style"), py::arg("x1"), py::arg("y1"), py::arg("x2"), py::arg("y2"),
+ py::arg("z"))
+ .def_readwrite("type", &GraphicElement::type)
+ .def_readwrite("x1", &GraphicElement::x1)
+ .def_readwrite("y1", &GraphicElement::y1)
+ .def_readwrite("x2", &GraphicElement::x2)
+ .def_readwrite("y2", &GraphicElement::y2)
+ .def_readwrite("text", &GraphicElement::text);
+
+ py::enum_<PortType>(m, "PortType")
+ .value("PORT_IN", PORT_IN)
+ .value("PORT_OUT", PORT_OUT)
+ .value("PORT_INOUT", PORT_INOUT)
+ .export_values();
+
+ py::enum_<PlaceStrength>(m, "PlaceStrength")
+ .value("STRENGTH_NONE", STRENGTH_NONE)
+ .value("STRENGTH_WEAK", STRENGTH_WEAK)
+ .value("STRENGTH_STRONG", STRENGTH_STRONG)
+ .value("STRENGTH_FIXED", STRENGTH_FIXED)
+ .value("STRENGTH_LOCKED", STRENGTH_LOCKED)
+ .value("STRENGTH_USER", STRENGTH_USER)
+ .export_values();
+
+ py::class_<DelayPair>(m, "DelayPair")
+ .def(py::init<>())
+ .def(py::init<delay_t>())
+ .def(py::init<delay_t, delay_t>())
+ .def_readwrite("min_delay", &DelayPair::min_delay)
+ .def_readwrite("max_delay", &DelayPair::max_delay)
+ .def("minDelay", &DelayPair::minDelay)
+ .def("maxDelay", &DelayPair::maxDelay);
+
+ py::class_<DelayQuad>(m, "DelayQuad")
+ .def(py::init<>())
+ .def(py::init<delay_t>())
+ .def(py::init<delay_t, delay_t>())
+ .def(py::init<delay_t, delay_t, delay_t, delay_t>())
+ .def(py::init<DelayPair, DelayPair>())
+ .def_readwrite("rise", &DelayQuad::rise)
+ .def_readwrite("fall", &DelayQuad::fall)
+ .def("minDelay", &DelayQuad::minDelay)
+ .def("minRiseDelay", &DelayQuad::minRiseDelay)
+ .def("minFallDelay", &DelayQuad::minFallDelay)
+ .def("maxDelay", &DelayQuad::maxDelay)
+ .def("maxRiseDelay", &DelayQuad::maxRiseDelay)
+ .def("maxFallDelay", &DelayQuad::maxFallDelay)
+ .def("delayPair", &DelayQuad::delayPair);
+
+ typedef dict<IdString, Property> AttrMap;
+ typedef dict<IdString, PortInfo> PortMap;
+ typedef dict<IdString, IdString> IdIdMap;
+ typedef dict<IdString, std::unique_ptr<Region>> RegionMap;
+
+ py::class_<BaseCtx>(m, "BaseCtx");
+
+ auto loc_cls = py::class_<Loc>(m, "Loc")
+ .def(py::init<int, int, int>())
+ .def_readwrite("x", &Loc::x)
+ .def_readwrite("y", &Loc::y)
+ .def_readwrite("z", &Loc::z)
+ .def("__repr__", loc_repr_py);
+
+ auto ci_cls = py::class_<ContextualWrapper<CellInfo &>>(m, "CellInfo");
+ readwrite_wrapper<CellInfo &, decltype(&CellInfo::name), &CellInfo::name, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(ci_cls, "name");
+ readwrite_wrapper<CellInfo &, decltype(&CellInfo::type), &CellInfo::type, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(ci_cls, "type");
+ readonly_wrapper<CellInfo &, decltype(&CellInfo::attrs), &CellInfo::attrs, wrap_context<AttrMap &>>::def_wrap(
+ ci_cls, "attrs");
+ readonly_wrapper<CellInfo &, decltype(&CellInfo::params), &CellInfo::params, wrap_context<AttrMap &>>::def_wrap(
+ ci_cls, "params");
+ readonly_wrapper<CellInfo &, decltype(&CellInfo::ports), &CellInfo::ports, wrap_context<PortMap &>>::def_wrap(
+ ci_cls, "ports");
+ readwrite_wrapper<CellInfo &, decltype(&CellInfo::bel), &CellInfo::bel, conv_to_str<BelId>,
+ conv_from_str<BelId>>::def_wrap(ci_cls, "bel");
+ readwrite_wrapper<CellInfo &, decltype(&CellInfo::belStrength), &CellInfo::belStrength, pass_through<PlaceStrength>,
+ pass_through<PlaceStrength>>::def_wrap(ci_cls, "belStrength");
+
+ fn_wrapper_1a_v<CellInfo &, decltype(&CellInfo::addInput), &CellInfo::addInput, conv_from_str<IdString>>::def_wrap(
+ ci_cls, "addInput");
+ fn_wrapper_1a_v<CellInfo &, decltype(&CellInfo::addOutput), &CellInfo::addOutput,
+ conv_from_str<IdString>>::def_wrap(ci_cls, "addOutput");
+ fn_wrapper_1a_v<CellInfo &, decltype(&CellInfo::addInout), &CellInfo::addInout, conv_from_str<IdString>>::def_wrap(
+ ci_cls, "addInout");
+
+ fn_wrapper_2a_v<CellInfo &, decltype(&CellInfo::setParam), &CellInfo::setParam, conv_from_str<IdString>,
+ conv_from_str<Property>>::def_wrap(ci_cls, "setParam");
+ fn_wrapper_1a_v<CellInfo &, decltype(&CellInfo::unsetParam), &CellInfo::unsetParam,
+ conv_from_str<IdString>>::def_wrap(ci_cls, "unsetParam");
+ fn_wrapper_2a_v<CellInfo &, decltype(&CellInfo::setAttr), &CellInfo::setAttr, conv_from_str<IdString>,
+ conv_from_str<Property>>::def_wrap(ci_cls, "setAttr");
+ fn_wrapper_1a_v<CellInfo &, decltype(&CellInfo::unsetAttr), &CellInfo::unsetAttr,
+ conv_from_str<IdString>>::def_wrap(ci_cls, "unsetAttr");
+
+ auto pi_cls = py::class_<ContextualWrapper<PortInfo &>>(m, "PortInfo");
+ readwrite_wrapper<PortInfo &, decltype(&PortInfo::name), &PortInfo::name, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(pi_cls, "name");
+ readonly_wrapper<PortInfo &, decltype(&PortInfo::net), &PortInfo::net, deref_and_wrap<NetInfo>>::def_wrap(pi_cls,
+ "net");
+ readwrite_wrapper<PortInfo &, decltype(&PortInfo::type), &PortInfo::type, pass_through<PortType>,
+ pass_through<PortType>>::def_wrap(pi_cls, "type");
+
+ typedef indexed_store<PortRef> PortRefVector;
+ typedef dict<WireId, PipMap> WireMap;
+ typedef pool<BelId> BelSet;
+ typedef pool<WireId> WireSet;
+
+ auto ni_cls = py::class_<ContextualWrapper<NetInfo &>>(m, "NetInfo");
+ readwrite_wrapper<NetInfo &, decltype(&NetInfo::name), &NetInfo::name, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(ni_cls, "name");
+ readonly_wrapper<NetInfo &, decltype(&NetInfo::driver), &NetInfo::driver, wrap_context<PortRef>>::def_wrap(
+ ni_cls, "driver");
+ readonly_wrapper<NetInfo &, decltype(&NetInfo::users), &NetInfo::users, wrap_context<PortRefVector &>>::def_wrap(
+ ni_cls, "users");
+ readonly_wrapper<NetInfo &, decltype(&NetInfo::wires), &NetInfo::wires, wrap_context<WireMap &>>::def_wrap(ni_cls,
+ "wires");
+
+ auto pr_cls = py::class_<ContextualWrapper<PortRef>>(m, "PortRef");
+ readonly_wrapper<PortRef, decltype(&PortRef::cell), &PortRef::cell, deref_and_wrap<CellInfo>>::def_wrap(pr_cls,
+ "cell");
+ readonly_wrapper<PortRef, decltype(&PortRef::port), &PortRef::port, conv_to_str<IdString>>::def_wrap(pr_cls,
+ "port");
+ readonly_wrapper<PortRef, decltype(&PortRef::budget), &PortRef::budget, pass_through<delay_t>>::def_wrap(pr_cls,
+ "budget");
+
+ auto pm_cls = py::class_<ContextualWrapper<PipMap &>>(m, "PipMap");
+ readwrite_wrapper<PipMap &, decltype(&PipMap::pip), &PipMap::pip, conv_to_str<PipId>,
+ conv_from_str<PipId>>::def_wrap(pm_cls, "pip");
+ readwrite_wrapper<PipMap &, decltype(&PipMap::strength), &PipMap::strength, pass_through<PlaceStrength>,
+ pass_through<PlaceStrength>>::def_wrap(pm_cls, "strength");
+
+ m.def("parse_json", parse_json_shim);
+ m.def("load_design", load_design_shim, py::return_value_policy::take_ownership);
+
+ auto region_cls = py::class_<ContextualWrapper<Region &>>(m, "Region");
+ readwrite_wrapper<Region &, decltype(&Region::name), &Region::name, conv_to_str<IdString>,
+ conv_from_str<IdString>>::def_wrap(region_cls, "name");
+ readwrite_wrapper<Region &, decltype(&Region::constr_bels), &Region::constr_bels, pass_through<bool>,
+ pass_through<bool>>::def_wrap(region_cls, "constr_bels");
+ readwrite_wrapper<Region &, decltype(&Region::constr_wires), &Region::constr_wires, pass_through<bool>,
+ pass_through<bool>>::def_wrap(region_cls, "constr_bels");
+ readwrite_wrapper<Region &, decltype(&Region::constr_pips), &Region::constr_pips, pass_through<bool>,
+ pass_through<bool>>::def_wrap(region_cls, "constr_pips");
+ readonly_wrapper<Region &, decltype(&Region::bels), &Region::bels, wrap_context<BelSet &>>::def_wrap(region_cls,
+ "bels");
+ readonly_wrapper<Region &, decltype(&Region::wires), &Region::wires, wrap_context<WireSet &>>::def_wrap(region_cls,
+ "wires");
+
+ auto hierarchy_cls = py::class_<ContextualWrapper<HierarchicalCell &>>(m, "HierarchicalCell");
+ readwrite_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::name), &HierarchicalCell::name,
+ conv_to_str<IdString>, conv_from_str<IdString>>::def_wrap(hierarchy_cls, "name");
+ readwrite_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::type), &HierarchicalCell::type,
+ conv_to_str<IdString>, conv_from_str<IdString>>::def_wrap(hierarchy_cls, "type");
+ readwrite_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::parent), &HierarchicalCell::parent,
+ conv_to_str<IdString>, conv_from_str<IdString>>::def_wrap(hierarchy_cls, "parent");
+ readwrite_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::fullpath), &HierarchicalCell::fullpath,
+ conv_to_str<IdString>, conv_from_str<IdString>>::def_wrap(hierarchy_cls, "fullpath");
+
+ readonly_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::leaf_cells), &HierarchicalCell::leaf_cells,
+ wrap_context<IdIdMap &>>::def_wrap(hierarchy_cls, "leaf_cells");
+ readonly_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::nets), &HierarchicalCell::nets,
+ wrap_context<IdIdMap &>>::def_wrap(hierarchy_cls, "nets");
+ readonly_wrapper<HierarchicalCell &, decltype(&HierarchicalCell::hier_cells), &HierarchicalCell::hier_cells,
+ wrap_context<IdIdMap &>>::def_wrap(hierarchy_cls, "hier_cells");
+ WRAP_MAP(m, AttrMap, conv_to_str<Property>, "AttrMap");
+ WRAP_MAP(m, PortMap, wrap_context<PortInfo &>, "PortMap");
+ WRAP_MAP(m, IdIdMap, conv_to_str<IdString>, "IdIdMap");
+ WRAP_MAP(m, WireMap, wrap_context<PipMap &>, "WireMap");
+ WRAP_MAP_UPTR(m, RegionMap, "RegionMap");
+
+ WRAP_INDEXSTORE(m, PortRefVector, wrap_context<PortRef>);
+
+ typedef dict<IdString, ClockFmax> ClockFmaxMap;
+ WRAP_MAP(m, ClockFmaxMap, pass_through<ClockFmax>, "ClockFmaxMap");
+
+ auto clk_fmax_cls = py::class_<ClockFmax>(m, "ClockFmax")
+ .def_readonly("achieved", &ClockFmax::achieved)
+ .def_readonly("constraint", &ClockFmax::constraint);
+
+ auto tmg_result_cls = py::class_<ContextualWrapper<TimingResult &>>(m, "TimingResult");
+ readonly_wrapper<TimingResult &, decltype(&TimingResult::clock_fmax), &TimingResult::clock_fmax,
+ wrap_context<ClockFmaxMap &>>::def_wrap(tmg_result_cls, "clock_fmax");
+ arch_wrap_python(m);
+}
+
+#ifdef MAIN_EXECUTABLE
+static wchar_t *program;
+#endif
+
+void (*python_sighandler)(int) = nullptr;
+
+void init_python(const char *executable)
+{
+#ifdef MAIN_EXECUTABLE
+ program = Py_DecodeLocale(executable, NULL);
+ if (program == NULL) {
+ fprintf(stderr, "Fatal error: cannot decode executable filename\n");
+ exit(1);
+ }
+ Py_SetProgramName(program);
+ py::initialize_interpreter();
+ py::module::import(TOSTRING(MODULE_NAME));
+ PyRun_SimpleString("from " TOSTRING(MODULE_NAME) " import *");
+ python_sighandler = signal(SIGINT, SIG_DFL);
+#endif
+}
+
+void deinit_python()
+{
+#ifdef MAIN_EXECUTABLE
+ py::finalize_interpreter();
+ PyMem_RawFree(program);
+#endif
+}
+
+void execute_python_file(const char *python_file)
+{
+ try {
+ FILE *fp = fopen(python_file, "r");
+ if (fp == NULL) {
+ fprintf(stderr, "Fatal error: file not found %s\n", python_file);
+ exit(1);
+ }
+ if (python_sighandler)
+ signal(SIGINT, python_sighandler);
+ int result = PyRun_SimpleFile(fp, python_file);
+ signal(SIGINT, SIG_DFL);
+ fclose(fp);
+ if (result == -1) {
+ log_error("Error occurred while executing Python script %s\n", python_file);
+ }
+ } catch (py::error_already_set const &) {
+ // Parse and output the exception
+ std::string perror_str = parse_python_exception();
+ signal(SIGINT, SIG_DFL);
+ log_error("Error in Python: %s\n", perror_str.c_str());
+ }
+}
+
+NEXTPNR_NAMESPACE_END
+
+#endif // NO_PYTHON
diff --git a/common/kernel/pybindings.h b/common/kernel/pybindings.h
new file mode 100644
index 00000000..695441f3
--- /dev/null
+++ b/common/kernel/pybindings.h
@@ -0,0 +1,93 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef COMMON_PYBINDINGS_H
+#define COMMON_PYBINDINGS_H
+
+#include <Python.h>
+#include <iostream>
+#include <pybind11/embed.h>
+#include <pybind11/pybind11.h>
+#include <stdexcept>
+#include <utility>
+#include "pycontainers.h"
+#include "pywrappers.h"
+
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace py = pybind11;
+
+std::string parse_python_exception();
+
+template <typename Tn> void python_export_global(const char *name, Tn &x)
+{
+ try {
+ py::object obj = py::cast(x, py::return_value_policy::reference);
+ py::module::import("__main__").attr(name) = obj.ptr();
+ } catch (pybind11::error_already_set &) {
+ // Parse and output the exception
+ std::string perror_str = parse_python_exception();
+ std::cout << "Error in Python: " << perror_str << std::endl;
+ std::terminate();
+ }
+};
+
+void init_python(const char *executable);
+
+void deinit_python();
+
+void execute_python_file(const char *python_file);
+
+// Defauld IdString conversions
+namespace PythonConversion {
+
+template <> struct string_converter<IdString>
+{
+ inline IdString from_str(Context *ctx, std::string name) { return ctx->id(name); }
+
+ inline std::string to_str(Context *ctx, IdString id) { return id.str(ctx); }
+};
+
+template <> struct string_converter<const IdString>
+{
+ inline IdString from_str(Context *ctx, std::string name) { return ctx->id(name); }
+
+ inline std::string to_str(Context *ctx, IdString id) { return id.str(ctx); }
+};
+
+template <> struct string_converter<IdStringList>
+{
+ IdStringList from_str(Context *ctx, std::string name) { return IdStringList::parse(ctx, name); }
+ std::string to_str(Context *ctx, const IdStringList &id) { return id.str(ctx); }
+};
+
+template <> struct string_converter<const IdStringList>
+{
+ IdStringList from_str(Context *ctx, std::string name) { return IdStringList::parse(ctx, name); }
+ std::string to_str(Context *ctx, const IdStringList &id) { return id.str(ctx); }
+};
+
+} // namespace PythonConversion
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* end of include guard: COMMON_PYBINDINGS_HH */
diff --git a/common/kernel/pycontainers.h b/common/kernel/pycontainers.h
new file mode 100644
index 00000000..ff49c34c
--- /dev/null
+++ b/common/kernel/pycontainers.h
@@ -0,0 +1,575 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef COMMON_PYCONTAINERS_H
+#define COMMON_PYCONTAINERS_H
+
+#include <pybind11/pybind11.h>
+#include <sstream>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+#include "nextpnr.h"
+#include "pywrappers.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace py = pybind11;
+
+inline void KeyError()
+{
+ PyErr_SetString(PyExc_KeyError, "Key not found");
+ throw py::error_already_set();
+}
+
+/*
+A wrapper for a Pythonised nextpnr Iterator. The actual class wrapped is a
+pair<Iterator, Iterator> containing (current, end), wrapped in a ContextualWrapper
+
+*/
+
+template <typename T, py::return_value_policy P, typename value_conv = PythonConversion::pass_through<T>>
+struct iterator_wrapper
+{
+ typedef decltype(*(std::declval<T>())) value_t;
+
+ typedef PythonConversion::ContextualWrapper<std::pair<T, T>> wrapped_iter_t;
+ using return_t = typename value_conv::ret_type;
+
+ static return_t next(wrapped_iter_t &iter)
+ {
+ if (iter.base.first != iter.base.second) {
+ return_t val = value_conv()(iter.ctx, *iter.base.first);
+ ++iter.base.first;
+ return val;
+ } else {
+ PyErr_SetString(PyExc_StopIteration, "End of range reached");
+ throw py::error_already_set();
+ }
+ }
+
+ static void wrap(py::module &m, const char *python_name)
+ {
+ py::class_<wrapped_iter_t>(m, python_name).def("__next__", next, P);
+ }
+};
+
+/*
+A pair that doesn't automatically become a tuple
+*/
+template <typename Ta, typename Tb> struct iter_pair
+{
+ iter_pair(){};
+ iter_pair(const Ta &first, const Tb &second) : first(first), second(second){};
+ Ta first;
+ Tb second;
+};
+
+/*
+A wrapper for a nextpnr Range. Ranges should have two functions, begin()
+and end() which return iterator-like objects supporting ++, * and !=
+Full STL iterator semantics are not required, unlike the standard Boost wrappers
+*/
+
+template <typename T, py::return_value_policy P = py::return_value_policy::copy,
+ typename value_conv = PythonConversion::pass_through<T>>
+struct range_wrapper
+{
+ typedef decltype(std::declval<T>().begin()) iterator_t;
+ typedef decltype(*(std::declval<iterator_t>())) value_t;
+ typedef typename PythonConversion::ContextualWrapper<T> wrapped_range;
+ typedef typename PythonConversion::ContextualWrapper<std::pair<iterator_t, iterator_t>> wrapped_pair;
+ static wrapped_pair iter(wrapped_range &range)
+ {
+ return wrapped_pair(range.ctx, std::make_pair(range.base.begin(), range.base.end()));
+ }
+
+ static std::string repr(wrapped_range &range)
+ {
+ PythonConversion::string_converter<value_t> conv;
+ bool first = true;
+ std::stringstream ss;
+ ss << "[";
+ for (const auto &item : range.base) {
+ if (!first)
+ ss << ", ";
+ ss << "'" << conv.to_str(range.ctx, item) << "'";
+ first = false;
+ }
+ ss << "]";
+ return ss.str();
+ }
+
+ static void wrap(py::module &m, const char *range_name, const char *iter_name)
+ {
+ py::class_<wrapped_range>(m, range_name).def("__iter__", iter).def("__repr__", repr);
+ iterator_wrapper<iterator_t, P, value_conv>().wrap(m, iter_name);
+ }
+
+ typedef iterator_wrapper<iterator_t, P, value_conv> iter_wrap;
+};
+
+#define WRAP_RANGE(m, t, conv) \
+ range_wrapper<t##Range, py::return_value_policy::copy, conv>().wrap(m, #t "Range", #t "Iterator")
+
+/*
+A wrapper for a vector or similar structure. With support for conversion
+*/
+
+template <typename T, py::return_value_policy P = py::return_value_policy::copy,
+ typename value_conv = PythonConversion::pass_through<T>>
+struct vector_wrapper
+{
+ typedef decltype(std::declval<T>().begin()) iterator_t;
+ typedef decltype(*(std::declval<iterator_t>())) value_t;
+ typedef typename PythonConversion::ContextualWrapper<T &> wrapped_vector;
+ typedef typename PythonConversion::ContextualWrapper<std::pair<iterator_t, iterator_t>> wrapped_pair;
+ using return_t = typename value_conv::ret_type;
+ static wrapped_pair iter(wrapped_vector &range)
+ {
+ return wrapped_pair(range.ctx, std::make_pair(range.base.begin(), range.base.end()));
+ }
+
+ static std::string repr(wrapped_vector &range)
+ {
+ PythonConversion::string_converter<value_t> conv;
+ bool first = true;
+ std::stringstream ss;
+ ss << "[";
+ for (const auto &item : range.base) {
+ if (!first)
+ ss << ", ";
+ ss << "'" << conv.to_str(range.ctx, item) << "'";
+ first = false;
+ }
+ ss << "]";
+ return ss.str();
+ }
+
+ static int len(wrapped_vector &range) { return range.base.size(); }
+
+ static return_t getitem(wrapped_vector &range, int i)
+ {
+ return value_conv()(range.ctx, boost::ref(range.base.at(i)));
+ }
+
+ static void wrap(py::module &m, const char *range_name, const char *iter_name)
+ {
+ py::class_<wrapped_vector>(m, range_name)
+ .def("__iter__", iter)
+ .def("__repr__", repr)
+ .def("__len__", len)
+ .def("__getitem__", getitem);
+
+ iterator_wrapper<iterator_t, P, value_conv>().wrap(m, iter_name);
+ }
+
+ typedef iterator_wrapper<iterator_t, P, value_conv> iter_wrap;
+};
+
+#define WRAP_VECTOR(m, t, conv) vector_wrapper<t, py::return_value_policy::copy, conv>().wrap(m, #t, #t "Iterator")
+
+template <typename T, py::return_value_policy P = py::return_value_policy::copy,
+ typename value_conv = PythonConversion::pass_through<T>>
+struct indexed_store_wrapper
+{
+ typedef decltype(std::declval<T>().begin()) iterator_t;
+ typedef decltype(*(std::declval<iterator_t>())) value_t;
+ typedef typename PythonConversion::ContextualWrapper<T &> wrapped_vector;
+ typedef typename PythonConversion::ContextualWrapper<std::pair<iterator_t, iterator_t>> wrapped_pair;
+ using return_t = typename value_conv::ret_type;
+ static wrapped_pair iter(wrapped_vector &range)
+ {
+ return wrapped_pair(range.ctx, std::make_pair(range.base.begin(), range.base.end()));
+ }
+
+ static std::string repr(wrapped_vector &range)
+ {
+ PythonConversion::string_converter<value_t> conv;
+ bool first = true;
+ std::stringstream ss;
+ ss << "[";
+ for (const auto &item : range.base) {
+ if (!first)
+ ss << ", ";
+ ss << "'" << conv.to_str(range.ctx, item) << "'";
+ first = false;
+ }
+ ss << "]";
+ return ss.str();
+ }
+
+ static int len(wrapped_vector &range) { return range.base.capacity(); }
+
+ static py::object getitem(wrapped_vector &range, int i)
+ {
+ store_index<std::remove_reference_t<value_t>> idx(i);
+ if (!range.base.count(idx))
+ throw py::none();
+ return py::cast(value_conv()(range.ctx, boost::ref(range.base.at(idx))));
+ }
+
+ static void wrap(py::module &m, const char *range_name, const char *iter_name)
+ {
+ py::class_<wrapped_vector>(m, range_name)
+ .def("__iter__", iter)
+ .def("__repr__", repr)
+ .def("__len__", len)
+ .def("__getitem__", getitem);
+
+ iterator_wrapper<iterator_t, P, value_conv>().wrap(m, iter_name);
+ }
+
+ typedef iterator_wrapper<iterator_t, P, value_conv> iter_wrap;
+};
+
+#define WRAP_INDEXSTORE(m, t, conv) \
+ indexed_store_wrapper<t, py::return_value_policy::copy, conv>().wrap(m, #t, #t "Iterator")
+
+/*
+Wrapper for a pair, allows accessing either using C++-style members (.first and
+.second) or as a Python iterable and indexable object
+*/
+template <typename T1, typename T2> struct pair_wrapper
+{
+ typedef std::pair<T1, T2> T;
+
+ struct pair_iterator_wrapper
+ {
+ static py::object next(iter_pair<T &, int> &iter)
+ {
+ if (iter.second == 0) {
+ iter.second++;
+ return py::cast(iter.first.first);
+ } else if (iter.second == 1) {
+ iter.second++;
+ return py::cast(iter.first.second);
+ } else {
+ PyErr_SetString(PyExc_StopIteration, "End of range reached");
+ throw py::error_already_set();
+ }
+ }
+
+ static void wrap(py::module &m, const char *python_name)
+ {
+ py::class_<iter_pair<T &, int>>(m, python_name).def("__next__", next);
+ }
+ };
+
+ static py::object get(T &x, int i)
+ {
+ if ((i >= 2) || (i < 0))
+ KeyError();
+ return (i == 1) ? py::object(x.second) : py::object(x.first);
+ }
+
+ static void set(T &x, int i, py::object val)
+ {
+ if ((i >= 2) || (i < 0))
+ KeyError();
+ if (i == 0)
+ x.first = val.cast<T1>();
+ if (i == 1)
+ x.second = val.cast<T2>();
+ }
+
+ static int len(T &x) { return 2; }
+
+ static iter_pair<T &, int> iter(T &x) { return iter_pair<T &, int>(boost::ref(x), 0); };
+
+ static void wrap(py::module &m, const char *pair_name, const char *iter_name)
+ {
+ pair_iterator_wrapper::wrap(m, iter_name);
+ py::class_<T>(m, pair_name)
+ .def("__iter__", iter)
+ .def("__len__", len)
+ .def("__getitem__", get)
+ .def("__setitem__", set, py::keep_alive<1, 2>())
+ .def_readwrite("first", &T::first)
+ .def_readwrite("second", &T::second);
+ }
+};
+
+/*
+Special case of above for map key/values
+ */
+template <typename T1, typename T2, typename value_conv> struct map_pair_wrapper
+{
+ typedef std::pair<T1, T2> T;
+ typedef PythonConversion::ContextualWrapper<T &> wrapped_pair;
+ typedef typename T::second_type V;
+
+ struct pair_iterator_wrapper
+ {
+ static py::object next(iter_pair<wrapped_pair &, int> &iter)
+ {
+ if (iter.second == 0) {
+ iter.second++;
+ return py::cast(PythonConversion::string_converter<decltype(iter.first.base.first)>().to_str(
+ iter.first.ctx, iter.first.base.first));
+ } else if (iter.second == 1) {
+ iter.second++;
+ return py::cast(value_conv()(iter.first.ctx, iter.first.base.second));
+ } else {
+ PyErr_SetString(PyExc_StopIteration, "End of range reached");
+ throw py::error_already_set();
+ }
+ }
+
+ static void wrap(py::module &m, const char *python_name)
+ {
+ py::class_<iter_pair<wrapped_pair &, int>>(m, python_name).def("__next__", next);
+ }
+ };
+
+ static py::object get(wrapped_pair &x, int i)
+ {
+ if ((i >= 2) || (i < 0))
+ KeyError();
+ return (i == 1) ? py::cast(value_conv()(x.ctx, x.base.second))
+ : py::cast(PythonConversion::string_converter<decltype(x.base.first)>().to_str(x.ctx,
+ x.base.first));
+ }
+
+ static int len(wrapped_pair &x) { return 2; }
+
+ static iter_pair<wrapped_pair &, int> iter(wrapped_pair &x)
+ {
+ return iter_pair<wrapped_pair &, int>(boost::ref(x), 0);
+ };
+
+ static std::string first_getter(wrapped_pair &t)
+ {
+ return PythonConversion::string_converter<decltype(t.base.first)>().to_str(t.ctx, t.base.first);
+ }
+
+ static typename value_conv::ret_type second_getter(wrapped_pair &t) { return value_conv()(t.ctx, t.base.second); }
+
+ static void wrap(py::module &m, const char *pair_name, const char *iter_name)
+ {
+ pair_iterator_wrapper::wrap(m, iter_name);
+ py::class_<wrapped_pair>(m, pair_name)
+ .def("__iter__", iter)
+ .def("__len__", len)
+ .def("__getitem__", get)
+ .def_property_readonly("first", first_getter)
+ .def_property_readonly("second", second_getter);
+ }
+};
+
+/*
+Wrapper for a map, either an unordered_map, regular map or dict
+ */
+
+template <typename T, typename value_conv> struct map_wrapper
+{
+ typedef typename std::remove_cv<typename std::remove_reference<typename T::key_type>::type>::type K;
+ typedef typename T::mapped_type V;
+ typedef typename value_conv::ret_type wrapped_V;
+ typedef typename T::value_type KV;
+ typedef typename PythonConversion::ContextualWrapper<T &> wrapped_map;
+
+ static wrapped_V get(wrapped_map &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ if (x.base.find(k) != x.base.end())
+ return value_conv()(x.ctx, x.base.at(k));
+ KeyError();
+
+ // Should be unreachable, but prevent control may reach end of non-void
+ throw std::runtime_error("unreachable");
+ }
+
+ static void set(wrapped_map &x, std::string const &i, V const &v)
+ {
+ x.base[PythonConversion::string_converter<K>().from_str(x.ctx, i)] = v;
+ }
+
+ static size_t len(wrapped_map &x) { return x.base.size(); }
+
+ static void del(T const &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ if (x.base.find(k) != x.base.end())
+ x.base.erase(k);
+ else
+ KeyError();
+ }
+
+ static bool contains(wrapped_map &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ return x.base.count(k);
+ }
+
+ static void wrap(py::module &m, const char *map_name, const char *kv_name, const char *kv_iter_name,
+ const char *iter_name)
+ {
+ map_pair_wrapper<typename KV::first_type, typename KV::second_type, value_conv>::wrap(m, kv_name, kv_iter_name);
+ typedef range_wrapper<T &, py::return_value_policy::copy, PythonConversion::wrap_context<KV &>> rw;
+ typename rw::iter_wrap().wrap(m, iter_name);
+ py::class_<wrapped_map>(m, map_name)
+ .def("__iter__", rw::iter)
+ .def("__len__", len)
+ .def("__contains__", contains)
+ .def("__getitem__", get)
+ .def("__setitem__", set, py::keep_alive<1, 2>());
+ }
+};
+
+/*
+Special case of above for map key/values where value is a unique_ptr
+ */
+template <typename T1, typename T2> struct map_pair_wrapper_uptr
+{
+ typedef std::pair<T1, T2> T;
+ typedef PythonConversion::ContextualWrapper<T &> wrapped_pair;
+ typedef typename T::second_type::element_type V;
+
+ struct pair_iterator_wrapper
+ {
+ static py::object next(iter_pair<wrapped_pair &, int> &iter)
+ {
+ if (iter.second == 0) {
+ iter.second++;
+ return py::cast(PythonConversion::string_converter<decltype(iter.first.base.first)>().to_str(
+ iter.first.ctx, iter.first.base.first));
+ } else if (iter.second == 1) {
+ iter.second++;
+ return py::cast(
+ PythonConversion::ContextualWrapper<V &>(iter.first.ctx, *iter.first.base.second.get()));
+ } else {
+ PyErr_SetString(PyExc_StopIteration, "End of range reached");
+ throw py::error_already_set();
+ }
+ }
+
+ static void wrap(py::module &m, const char *python_name)
+ {
+ py::class_<iter_pair<wrapped_pair &, int>>(m, python_name).def("__next__", next);
+ }
+ };
+
+ static py::object get(wrapped_pair &x, int i)
+ {
+ if ((i >= 2) || (i < 0))
+ KeyError();
+ return (i == 1) ? py::cast(PythonConversion::ContextualWrapper<V &>(x.ctx, *x.base.second.get()))
+ : py::cast(PythonConversion::string_converter<decltype(x.base.first)>().to_str(x.ctx,
+ x.base.first));
+ }
+
+ static int len(wrapped_pair &x) { return 2; }
+
+ static iter_pair<wrapped_pair &, int> iter(wrapped_pair &x)
+ {
+ return iter_pair<wrapped_pair &, int>(boost::ref(x), 0);
+ };
+
+ static std::string first_getter(wrapped_pair &t)
+ {
+ return PythonConversion::string_converter<decltype(t.base.first)>().to_str(t.ctx, t.base.first);
+ }
+
+ static PythonConversion::ContextualWrapper<V &> second_getter(wrapped_pair &t)
+ {
+ return PythonConversion::ContextualWrapper<V &>(t.ctx, *t.base.second.get());
+ }
+
+ static void wrap(py::module &m, const char *pair_name, const char *iter_name)
+ {
+ pair_iterator_wrapper::wrap(m, iter_name);
+ py::class_<wrapped_pair>(m, pair_name)
+ .def("__iter__", iter)
+ .def("__len__", len)
+ .def("__getitem__", get)
+ .def_property_readonly("first", first_getter)
+ .def_property_readonly("second", second_getter);
+ }
+};
+
+/*
+Wrapper for a map, either an unordered_map, regular map or dict
+ */
+
+template <typename T> struct map_wrapper_uptr
+{
+ typedef typename std::remove_cv<typename std::remove_reference<typename T::key_type>::type>::type K;
+ typedef typename T::mapped_type::pointer V;
+ typedef typename T::mapped_type::element_type &Vr;
+ typedef typename T::value_type KV;
+ typedef typename PythonConversion::ContextualWrapper<T &> wrapped_map;
+
+ static PythonConversion::ContextualWrapper<Vr> get(wrapped_map &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ if (x.base.find(k) != x.base.end())
+ return PythonConversion::ContextualWrapper<Vr>(x.ctx, *x.base.at(k).get());
+ KeyError();
+
+ // Should be unreachable, but prevent control may reach end of non-void
+ throw std::runtime_error("unreachable");
+ }
+
+ static void set(wrapped_map &x, std::string const &i, V const &v)
+ {
+ x.base[PythonConversion::string_converter<K>().from_str(x.ctx, i)] = typename T::mapped_type(v);
+ }
+
+ static size_t len(wrapped_map &x) { return x.base.size(); }
+
+ static void del(T const &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ if (x.base.find(k) != x.base.end())
+ x.base.erase(k);
+ else
+ KeyError();
+ }
+
+ static bool contains(wrapped_map &x, std::string const &i)
+ {
+ K k = PythonConversion::string_converter<K>().from_str(x.ctx, i);
+ return x.base.count(k);
+ }
+
+ static void wrap(py::module &m, const char *map_name, const char *kv_name, const char *kv_iter_name,
+ const char *iter_name)
+ {
+ map_pair_wrapper_uptr<typename KV::first_type, typename KV::second_type>::wrap(m, kv_name, kv_iter_name);
+ typedef range_wrapper<T &, py::return_value_policy::copy, PythonConversion::wrap_context<KV &>> rw;
+ typename rw::iter_wrap().wrap(m, iter_name);
+ py::class_<wrapped_map>(m, map_name)
+ .def("__iter__", rw::iter)
+ .def("__len__", len)
+ .def("__contains__", contains)
+ .def("__getitem__", get)
+ .def("__setitem__", set, py::keep_alive<1, 2>());
+ }
+};
+
+#define WRAP_MAP(m, t, conv, name) \
+ map_wrapper<t, conv>().wrap(m, #name, #name "KeyValue", #name "KeyValueIter", #name "Iterator")
+#define WRAP_MAP_UPTR(m, t, name) \
+ map_wrapper_uptr<t>().wrap(m, #name, #name "KeyValue", #name "KeyValueIter", #name "Iterator")
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/pywrappers.h b/common/kernel/pywrappers.h
new file mode 100644
index 00000000..60ef65be
--- /dev/null
+++ b/common/kernel/pywrappers.h
@@ -0,0 +1,463 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef PYWRAPPERS_H
+#define PYWRAPPERS_H
+
+#include <pybind11/pybind11.h>
+#include <utility>
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace py = pybind11;
+
+namespace PythonConversion {
+template <typename T> struct ContextualWrapper
+{
+ Context *ctx;
+ T base;
+
+ inline ContextualWrapper(Context *c, T x) : ctx(c), base(x){};
+
+ inline operator T() { return base; };
+ typedef T base_type;
+};
+
+template <typename T> struct WrapIfNotContext
+{
+ typedef ContextualWrapper<T> maybe_wrapped_t;
+};
+
+template <> struct WrapIfNotContext<Context>
+{
+ typedef Context maybe_wrapped_t;
+};
+
+template <typename T> inline Context *get_ctx(typename WrapIfNotContext<T>::maybe_wrapped_t &wrp_ctx)
+{
+ return wrp_ctx.ctx;
+}
+
+template <> inline Context *get_ctx<Context>(WrapIfNotContext<Context>::maybe_wrapped_t &unwrp_ctx)
+{
+ return &unwrp_ctx;
+}
+
+template <typename T> inline T &get_base(typename WrapIfNotContext<T>::maybe_wrapped_t &wrp_ctx)
+{
+ return wrp_ctx.base;
+}
+
+template <> inline Context &get_base<Context>(WrapIfNotContext<Context>::maybe_wrapped_t &unwrp_ctx)
+{
+ return unwrp_ctx;
+}
+
+template <typename T> ContextualWrapper<T> wrap_ctx(Context *ctx, T x) { return ContextualWrapper<T>(ctx, x); }
+
+// Dummy class, to be implemented by users
+template <typename T> struct string_converter;
+
+class bad_wrap
+{
+};
+
+// Action options
+template <typename T> struct pass_through
+{
+ inline T operator()(Context *ctx, T x) { return x; }
+
+ using ret_type = T;
+ using arg_type = T;
+};
+
+template <typename T> struct wrap_context
+{
+ inline ContextualWrapper<T> operator()(Context *ctx, T x) { return ContextualWrapper<T>(ctx, x); }
+
+ using arg_type = T;
+ using ret_type = ContextualWrapper<T>;
+};
+
+template <typename T> struct unwrap_context
+{
+ inline T operator()(Context *ctx, ContextualWrapper<T> x) { return x.base; }
+
+ using ret_type = T;
+ using arg_type = ContextualWrapper<T>;
+};
+
+template <typename T> struct conv_from_str
+{
+ inline T operator()(Context *ctx, std::string x) { return string_converter<T>().from_str(ctx, x); }
+
+ using ret_type = T;
+ using arg_type = std::string;
+};
+
+template <typename T> struct conv_to_str
+{
+ inline std::string operator()(Context *ctx, T x) { return string_converter<T>().to_str(ctx, x); }
+
+ using ret_type = std::string;
+ using arg_type = T;
+};
+
+template <typename T> struct deref_and_wrap
+{
+ inline ContextualWrapper<T &> operator()(Context *ctx, T *x)
+ {
+ if (x == nullptr)
+ throw bad_wrap();
+ return ContextualWrapper<T &>(ctx, *x);
+ }
+
+ using arg_type = T *;
+ using ret_type = ContextualWrapper<T &>;
+};
+
+template <typename T> struct addr_and_unwrap
+{
+ inline T *operator()(Context *ctx, ContextualWrapper<T &> x) { return &(x.base); }
+
+ using arg_type = ContextualWrapper<T &>;
+ using ret_type = T *;
+};
+
+// Function wrapper
+// Zero parameters, one return
+template <typename Class, typename FuncT, FuncT fn, typename rv_conv> struct fn_wrapper_0a
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_result_type = typename rv_conv::ret_type;
+
+ static py::object wrapped_fn(class_type &cls)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(rv_conv()(ctx, (base.*fn)()));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+};
+
+// One parameter, one return
+template <typename Class, typename FuncT, FuncT fn, typename rv_conv, typename arg1_conv> struct fn_wrapper_1a
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_result_type = typename rv_conv::ret_type;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+
+ static py::object wrapped_fn(class_type &cls, conv_arg1_type arg1)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(rv_conv()(ctx, (base.*fn)(arg1_conv()(ctx, arg1))));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+};
+
+// Two parameters, one return
+template <typename Class, typename FuncT, FuncT fn, typename rv_conv, typename arg1_conv, typename arg2_conv>
+struct fn_wrapper_2a
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_result_type = typename rv_conv::ret_type;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+
+ static py::object wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(rv_conv()(ctx, (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2))));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+};
+
+// Three parameters, one return
+template <typename Class, typename FuncT, FuncT fn, typename rv_conv, typename arg1_conv, typename arg2_conv,
+ typename arg3_conv>
+struct fn_wrapper_3a
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_result_type = typename rv_conv::ret_type;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+ using conv_arg3_type = typename arg3_conv::arg_type;
+
+ static py::object wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2, conv_arg3_type arg3)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(
+ rv_conv()(ctx, (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2), arg3_conv()(ctx, arg3))));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+};
+
+// Zero parameters void
+template <typename Class, typename FuncT, FuncT fn> struct fn_wrapper_0a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+
+ static void wrapped_fn(class_type &cls)
+ {
+ Class &base = get_base<Class>(cls);
+ return (base.*fn)();
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+};
+
+// One parameter, void
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv> struct fn_wrapper_1a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename Ta>
+ static void def_wrap(WrapCls cls_, const char *name, Ta a = py::arg("arg1"))
+ {
+ cls_.def(name, wrapped_fn, a);
+ }
+};
+
+// Two parameters, no return
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv, typename arg2_conv> struct fn_wrapper_2a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename... Ta> static void def_wrap(WrapCls cls_, const char *name, Ta... a)
+ {
+ cls_.def(name, wrapped_fn, a...);
+ }
+};
+
+// Three parameters, no return
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv, typename arg2_conv, typename arg3_conv>
+struct fn_wrapper_3a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+ using conv_arg3_type = typename arg3_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2, conv_arg3_type arg3)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2), arg3_conv()(ctx, arg3));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename... Ta> static void def_wrap(WrapCls cls_, const char *name, Ta... a)
+ {
+ cls_.def(name, wrapped_fn, a...);
+ }
+};
+
+// Four parameters, no return
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv, typename arg2_conv, typename arg3_conv,
+ typename arg4_conv>
+struct fn_wrapper_4a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+ using conv_arg3_type = typename arg3_conv::arg_type;
+ using conv_arg4_type = typename arg4_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2, conv_arg3_type arg3,
+ conv_arg4_type arg4)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2), arg3_conv()(ctx, arg3), arg4_conv()(ctx, arg4));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename... Ta> static void def_wrap(WrapCls cls_, const char *name, Ta... a)
+ {
+ cls_.def(name, wrapped_fn, a...);
+ }
+};
+
+// Five parameters, no return
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv, typename arg2_conv, typename arg3_conv,
+ typename arg4_conv, typename arg5_conv>
+struct fn_wrapper_5a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+ using conv_arg3_type = typename arg3_conv::arg_type;
+ using conv_arg4_type = typename arg4_conv::arg_type;
+ using conv_arg5_type = typename arg5_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2, conv_arg3_type arg3,
+ conv_arg4_type arg4, conv_arg5_type arg5)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2), arg3_conv()(ctx, arg3), arg4_conv()(ctx, arg4),
+ arg5_conv()(ctx, arg5));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename... Ta> static void def_wrap(WrapCls cls_, const char *name, Ta... a)
+ {
+ cls_.def(name, wrapped_fn, a...);
+ }
+};
+
+// Six parameters, no return
+template <typename Class, typename FuncT, FuncT fn, typename arg1_conv, typename arg2_conv, typename arg3_conv,
+ typename arg4_conv, typename arg5_conv, typename arg6_conv>
+struct fn_wrapper_6a_v
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_arg1_type = typename arg1_conv::arg_type;
+ using conv_arg2_type = typename arg2_conv::arg_type;
+ using conv_arg3_type = typename arg3_conv::arg_type;
+ using conv_arg4_type = typename arg4_conv::arg_type;
+ using conv_arg5_type = typename arg5_conv::arg_type;
+ using conv_arg6_type = typename arg6_conv::arg_type;
+
+ static void wrapped_fn(class_type &cls, conv_arg1_type arg1, conv_arg2_type arg2, conv_arg3_type arg3,
+ conv_arg4_type arg4, conv_arg5_type arg5, conv_arg6_type arg6)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*fn)(arg1_conv()(ctx, arg1), arg2_conv()(ctx, arg2), arg3_conv()(ctx, arg3), arg4_conv()(ctx, arg4),
+ arg5_conv()(ctx, arg5), arg6_conv()(ctx, arg6));
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name) { cls_.def(name, wrapped_fn); }
+
+ template <typename WrapCls, typename... Ta> static void def_wrap(WrapCls cls_, const char *name, Ta... a)
+ {
+ cls_.def(name, wrapped_fn, a...);
+ }
+};
+
+// Wrapped getter
+template <typename Class, typename MemT, MemT mem, typename v_conv> struct readonly_wrapper
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_val_type = typename v_conv::ret_type;
+
+ static py::object wrapped_getter(class_type &cls)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(v_conv()(ctx, (base.*mem)));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name)
+ {
+ cls_.def_property_readonly(name, wrapped_getter);
+ }
+};
+
+// Wrapped getter/setter
+template <typename Class, typename MemT, MemT mem, typename get_conv, typename set_conv> struct readwrite_wrapper
+{
+ using class_type = typename WrapIfNotContext<Class>::maybe_wrapped_t;
+ using conv_val_type = typename get_conv::ret_type;
+
+ static py::object wrapped_getter(class_type &cls)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ try {
+ return py::cast(get_conv()(ctx, (base.*mem)));
+ } catch (bad_wrap &) {
+ return py::none();
+ }
+ }
+
+ using conv_arg_type = typename set_conv::arg_type;
+
+ static void wrapped_setter(class_type &cls, conv_arg_type val)
+ {
+ Context *ctx = get_ctx<Class>(cls);
+ Class &base = get_base<Class>(cls);
+ (base.*mem) = set_conv()(ctx, val);
+ }
+
+ template <typename WrapCls> static void def_wrap(WrapCls cls_, const char *name)
+ {
+ cls_.def_property(name, wrapped_getter, wrapped_setter);
+ }
+};
+
+} // namespace PythonConversion
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/relptr.h b/common/kernel/relptr.h
new file mode 100644
index 00000000..f0f45b7d
--- /dev/null
+++ b/common/kernel/relptr.h
@@ -0,0 +1,74 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef RELPTR_H
+#define RELPTR_H
+
+#include <cstdint>
+
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+template <typename T> struct RelPtr
+{
+ int32_t offset;
+
+ const T *get() const { return reinterpret_cast<const T *>(reinterpret_cast<const char *>(this) + offset); }
+
+ const T &operator[](std::size_t index) const { return get()[index]; }
+
+ const T &operator*() const { return *(get()); }
+
+ const T *operator->() const { return get(); }
+
+ RelPtr(const RelPtr &) = delete;
+ RelPtr &operator=(const RelPtr &) = delete;
+};
+
+NPNR_PACKED_STRUCT(template <typename T> struct RelSlice {
+ int32_t offset;
+ uint32_t length;
+
+ const T *get() const { return reinterpret_cast<const T *>(reinterpret_cast<const char *>(this) + offset); }
+
+ const T &operator[](std::size_t index) const
+ {
+ NPNR_ASSERT(index < length);
+ return get()[index];
+ }
+
+ const T *begin() const { return get(); }
+ const T *end() const { return get() + length; }
+
+ size_t size() const { return length; }
+ ptrdiff_t ssize() const { return length; }
+
+ const T &operator*() const { return *(get()); }
+
+ const T *operator->() const { return get(); }
+
+ RelSlice(const RelSlice &) = delete;
+ RelSlice &operator=(const RelSlice &) = delete;
+});
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* RELPTR_H */
diff --git a/common/kernel/report.cc b/common/kernel/report.cc
new file mode 100644
index 00000000..98ff14fb
--- /dev/null
+++ b/common/kernel/report.cc
@@ -0,0 +1,259 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "json11.hpp"
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+using namespace json11;
+
+namespace {
+dict<IdString, std::pair<int, int>> get_utilization(const Context *ctx)
+{
+ // Sort by Bel type
+ dict<IdString, std::pair<int, int>> result;
+ for (auto &cell : ctx->cells) {
+ result[ctx->getBelBucketName(ctx->getBelBucketForCellType(cell.second.get()->type))].first++;
+ }
+ for (auto bel : ctx->getBels()) {
+ if (!ctx->getBelHidden(bel)) {
+ result[ctx->getBelBucketName(ctx->getBelBucketForBel(bel))].second++;
+ }
+ }
+ return result;
+}
+} // namespace
+
+static std::string clock_event_name(const Context *ctx, const ClockEvent &e)
+{
+ std::string value;
+ if (e.clock == ctx->id("$async$"))
+ value = std::string("<async>");
+ else
+ value = (e.edge == FALLING_EDGE ? std::string("negedge ") : std::string("posedge ")) + e.clock.str(ctx);
+ return value;
+};
+
+static Json::array report_critical_paths(const Context *ctx)
+{
+
+ auto report_critical_path = [ctx](const CriticalPath &report) {
+ Json::array pathJson;
+
+ for (const auto &segment : report.segments) {
+
+ const auto &driver = ctx->cells.at(segment.from.first);
+ const auto &sink = ctx->cells.at(segment.to.first);
+
+ auto fromLoc = ctx->getBelLocation(driver->bel);
+ auto toLoc = ctx->getBelLocation(sink->bel);
+
+ auto fromJson = Json::object({{"cell", segment.from.first.c_str(ctx)},
+ {"port", segment.from.second.c_str(ctx)},
+ {"loc", Json::array({fromLoc.x, fromLoc.y})}});
+
+ auto toJson = Json::object({{"cell", segment.to.first.c_str(ctx)},
+ {"port", segment.to.second.c_str(ctx)},
+ {"loc", Json::array({toLoc.x, toLoc.y})}});
+
+ auto segmentJson = Json::object({
+ {"delay", ctx->getDelayNS(segment.delay)},
+ {"from", fromJson},
+ {"to", toJson},
+ });
+
+ if (segment.type == CriticalPath::Segment::Type::CLK_TO_Q) {
+ segmentJson["type"] = "clk-to-q";
+ } else if (segment.type == CriticalPath::Segment::Type::SOURCE) {
+ segmentJson["type"] = "source";
+ } else if (segment.type == CriticalPath::Segment::Type::LOGIC) {
+ segmentJson["type"] = "logic";
+ } else if (segment.type == CriticalPath::Segment::Type::SETUP) {
+ segmentJson["type"] = "setup";
+ } else if (segment.type == CriticalPath::Segment::Type::ROUTING) {
+ segmentJson["type"] = "routing";
+ segmentJson["net"] = segment.net.c_str(ctx);
+ segmentJson["budget"] = ctx->getDelayNS(segment.budget);
+ }
+
+ pathJson.push_back(segmentJson);
+ }
+
+ return pathJson;
+ };
+
+ auto critPathsJson = Json::array();
+
+ // Critical paths
+ for (auto &report : ctx->timing_result.clock_paths) {
+
+ critPathsJson.push_back(Json::object({{"from", clock_event_name(ctx, report.second.clock_pair.start)},
+ {"to", clock_event_name(ctx, report.second.clock_pair.end)},
+ {"path", report_critical_path(report.second)}}));
+ }
+
+ // Cross-domain paths
+ for (auto &report : ctx->timing_result.xclock_paths) {
+ critPathsJson.push_back(Json::object({{"from", clock_event_name(ctx, report.clock_pair.start)},
+ {"to", clock_event_name(ctx, report.clock_pair.end)},
+ {"path", report_critical_path(report)}}));
+ }
+
+ return critPathsJson;
+}
+
+static Json::array report_detailed_net_timings(const Context *ctx)
+{
+ auto detailedNetTimingsJson = Json::array();
+
+ // Detailed per-net timing analysis
+ for (const auto &it : ctx->timing_result.detailed_net_timings) {
+
+ const NetInfo *net = ctx->nets.at(it.first).get();
+ ClockEvent start = it.second[0].clock_pair.start;
+
+ Json::array endpointsJson;
+ for (const auto &sink_timing : it.second) {
+
+ // FIXME: Is it possible that there are multiple different start
+ // events for a single net? It has a single driver
+ NPNR_ASSERT(sink_timing.clock_pair.start == start);
+
+ auto endpointJson = Json::object({{"cell", sink_timing.cell_port.first.c_str(ctx)},
+ {"port", sink_timing.cell_port.second.c_str(ctx)},
+ {"event", clock_event_name(ctx, sink_timing.clock_pair.end)},
+ {"delay", ctx->getDelayNS(sink_timing.delay)},
+ {"budget", ctx->getDelayNS(sink_timing.budget)}});
+ endpointsJson.push_back(endpointJson);
+ }
+
+ auto netTimingJson = Json::object({{"net", net->name.c_str(ctx)},
+ {"driver", net->driver.cell->name.c_str(ctx)},
+ {"port", net->driver.port.c_str(ctx)},
+ {"event", clock_event_name(ctx, start)},
+ {"endpoints", endpointsJson}});
+
+ detailedNetTimingsJson.push_back(netTimingJson);
+ }
+
+ return detailedNetTimingsJson;
+}
+
+/*
+Report JSON structure:
+
+{
+ "utilization": {
+ <BEL name>: {
+ "available": <available count>,
+ "used": <used count>
+ },
+ ...
+ },
+ "fmax" {
+ <clock name>: {
+ "achieved": <achieved fmax [MHz]>,
+ "constraint": <target fmax [MHz]>
+ },
+ ...
+ },
+ "critical_paths": [
+ {
+ "from": <clock event edge and name>,
+ "to": <clock event edge and name>,
+ "path": [
+ {
+ "from": {
+ "cell": <driver cell name>
+ "port": <driver port name>
+ "loc": [
+ <grid x>,
+ <grid y>
+ ]
+ },
+ "to": {
+ "cell": <sink cell name>
+ "port": <sink port name>
+ "loc": [
+ <grid x>,
+ <grid y>
+ ]
+ },
+ "type": <path segment type "clk-to-q", "source", "logic", "routing" or "setup">,
+ "net": <net name (for routing only!)>,
+ "delay": <segment delay [ns]>,
+ "budget": <segment delay budget [ns] (for routing only!)>,
+ }
+ ...
+ ]
+ },
+ ...
+ ],
+ "detailed_net_timings": [
+ {
+ "driver": <driving cell name>,
+ "port": <driving cell port name>,
+ "event": <driver clock event name>,
+ "net": <net name>,
+ "endpoints": [
+ {
+ "cell": <sink cell name>,
+ "port": <sink cell port name>,
+ "event": <destination clock event name>,
+ "delay": <delay [ns]>,
+ "budget": <delay budget [ns]>,
+ }
+ ...
+ ]
+ }
+ ...
+ ]
+}
+*/
+
+void Context::writeReport(std::ostream &out) const
+{
+ auto util = get_utilization(this);
+ dict<std::string, Json> util_json;
+ for (const auto &kv : util) {
+ util_json[kv.first.str(this)] = Json::object{
+ {"used", kv.second.first},
+ {"available", kv.second.second},
+ };
+ }
+ dict<std::string, Json> fmax_json;
+ for (const auto &kv : timing_result.clock_fmax) {
+ fmax_json[kv.first.str(this)] = Json::object{
+ {"achieved", kv.second.achieved},
+ {"constraint", kv.second.constraint},
+ };
+ }
+
+ Json::object jsonRoot{
+ {"utilization", util_json}, {"fmax", fmax_json}, {"critical_paths", report_critical_paths(this)}};
+
+ if (detailed_timing_report) {
+ jsonRoot["detailed_net_timings"] = report_detailed_net_timings(this);
+ }
+
+ out << Json(jsonRoot).dump() << std::endl;
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/scope_lock.h b/common/kernel/scope_lock.h
new file mode 100644
index 00000000..2f0f767c
--- /dev/null
+++ b/common/kernel/scope_lock.h
@@ -0,0 +1,67 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2021 Symbiflow Authors.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef SCOPE_LOCK_H
+#define SCOPE_LOCK_H
+
+#include <stdexcept>
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// Provides a simple RAII locking object. ScopeLock takes a lock when
+// constructed, and releases the lock on destruction or if "unlock_early" is
+// called.
+//
+// LockingObject must have a method "void lock(void)" and "void unlock(void)".
+template <typename LockingObject> class ScopeLock
+{
+ public:
+ ScopeLock(LockingObject *obj) : obj_(obj), locked_(false)
+ {
+ obj_->lock();
+ locked_ = true;
+ }
+ ScopeLock(const ScopeLock &other) = delete;
+ ScopeLock(const ScopeLock &&other) = delete;
+
+ ~ScopeLock()
+ {
+ if (locked_) {
+ obj_->unlock();
+ }
+ }
+ void unlock_early()
+ {
+ if (!locked_) {
+ throw std::runtime_error("Lock already released?");
+ }
+ locked_ = false;
+ obj_->unlock();
+ }
+
+ private:
+ LockingObject *obj_;
+ bool locked_;
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* SCOPE_LOCK_H */
diff --git a/common/kernel/sdf.cc b/common/kernel/sdf.cc
new file mode 100644
index 00000000..acff56ed
--- /dev/null
+++ b/common/kernel/sdf.cc
@@ -0,0 +1,334 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2019 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "nextpnr.h"
+#include "util.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+namespace SDF {
+
+struct MinMaxTyp
+{
+ double min, typ, max;
+};
+
+struct RiseFallDelay
+{
+ MinMaxTyp rise, fall;
+};
+
+struct PortAndEdge
+{
+ std::string port;
+ ClockEdge edge;
+};
+
+struct IOPath
+{
+ std::string from, to;
+ RiseFallDelay delay;
+};
+
+struct TimingCheck
+{
+ enum CheckType
+ {
+ SETUPHOLD,
+ PERIOD,
+ WIDTH
+ } type;
+ PortAndEdge from, to;
+ RiseFallDelay delay;
+};
+
+struct Cell
+{
+ std::string celltype, instance;
+ std::vector<IOPath> iopaths;
+ std::vector<TimingCheck> checks;
+};
+
+struct CellPort
+{
+ std::string cell, port;
+};
+
+struct Interconnect
+{
+ CellPort from, to;
+ RiseFallDelay delay;
+};
+
+struct SDFWriter
+{
+ bool cvc_mode = false;
+ std::vector<Cell> cells;
+ std::vector<Interconnect> conn;
+ std::string sdfversion, design, vendor, program;
+
+ std::string format_name(const std::string &name)
+ {
+ std::string fmt = "\"";
+ for (char c : name) {
+ if (c == '\\' || c == '\"')
+ fmt += "\"";
+ fmt += c;
+ }
+ fmt += "\"";
+ return fmt;
+ }
+
+ std::string escape_name(const std::string &name)
+ {
+ std::string esc;
+ for (char c : name) {
+ if (c == '$' || c == '\\' || c == '[' || c == ']' || c == ':' || (cvc_mode && c == '.'))
+ esc += '\\';
+ esc += c;
+ }
+ return esc;
+ }
+
+ std::string timing_check_name(TimingCheck::CheckType type)
+ {
+ switch (type) {
+ case TimingCheck::SETUPHOLD:
+ return "SETUPHOLD";
+ case TimingCheck::PERIOD:
+ return "PERIOD";
+ case TimingCheck::WIDTH:
+ return "WIDTH";
+ default:
+ NPNR_ASSERT_FALSE("unknown timing check type");
+ }
+ }
+
+ void write_delay(std::ostream &out, const RiseFallDelay &delay)
+ {
+ write_delay(out, delay.rise);
+ out << " ";
+ write_delay(out, delay.fall);
+ }
+
+ void write_delay(std::ostream &out, const MinMaxTyp &delay)
+ {
+ if (cvc_mode)
+ out << "(" << int(delay.min) << ":" << int(delay.typ) << ":" << int(delay.max) << ")";
+ else
+ out << "(" << delay.min << ":" << delay.typ << ":" << delay.max << ")";
+ }
+
+ void write_port(std::ostream &out, const CellPort &port)
+ {
+ if (cvc_mode)
+ out << escape_name(port.cell) + "." + escape_name(port.port);
+ else
+ out << escape_name(port.cell + "/" + port.port);
+ }
+
+ void write_portedge(std::ostream &out, const PortAndEdge &pe)
+ {
+ out << "(" << (pe.edge == RISING_EDGE ? "posedge" : "negedge") << " " << escape_name(pe.port) << ")";
+ }
+
+ void write(std::ostream &out)
+ {
+ out << "(DELAYFILE" << std::endl;
+ // Headers and metadata
+ out << " (SDFVERSION " << format_name(sdfversion) << ")" << std::endl;
+ out << " (DESIGN " << format_name(design) << ")" << std::endl;
+ out << " (VENDOR " << format_name(vendor) << ")" << std::endl;
+ out << " (PROGRAM " << format_name(program) << ")" << std::endl;
+ out << " (DIVIDER " << (cvc_mode ? "." : "/") << ")" << std::endl;
+ out << " (TIMESCALE 1ps)" << std::endl;
+ // Write interconnect delays, with the main design begin a "cell"
+ out << " (CELL" << std::endl;
+ out << " (CELLTYPE " << format_name(design) << ")" << std::endl;
+ out << " (INSTANCE )" << std::endl;
+ out << " (DELAY" << std::endl;
+ out << " (ABSOLUTE" << std::endl;
+ for (auto &ic : conn) {
+ out << " (INTERCONNECT ";
+ write_port(out, ic.from);
+ out << " ";
+ write_port(out, ic.to);
+ out << " ";
+ write_delay(out, ic.delay);
+ out << ")" << std::endl;
+ }
+ out << " )" << std::endl;
+ out << " )" << std::endl;
+ out << " )" << std::endl;
+ // Write cells
+ for (auto &cell : cells) {
+ out << " (CELL" << std::endl;
+ out << " (CELLTYPE " << format_name(cell.celltype) << ")" << std::endl;
+ out << " (INSTANCE " << escape_name(cell.instance) << ")" << std::endl;
+ // IOPATHs (combinational delay and clock-to-q)
+ if (!cell.iopaths.empty()) {
+ out << " (DELAY" << std::endl;
+ out << " (ABSOLUTE" << std::endl;
+ for (auto &path : cell.iopaths) {
+ out << " (IOPATH " << escape_name(path.from) << " " << escape_name(path.to) << " ";
+ write_delay(out, path.delay);
+ out << ")" << std::endl;
+ }
+ out << " )" << std::endl;
+ out << " )" << std::endl;
+ }
+ // Timing Checks (setup/hold, period, width)
+ if (!cell.checks.empty()) {
+ out << " (TIMINGCHECK" << std::endl;
+ for (auto &check : cell.checks) {
+ out << " (" << timing_check_name(check.type) << " ";
+ write_portedge(out, check.from);
+ out << " ";
+ if (check.type == TimingCheck::SETUPHOLD) {
+ write_portedge(out, check.to);
+ out << " ";
+ }
+ if (check.type == TimingCheck::SETUPHOLD)
+ write_delay(out, check.delay);
+ else
+ write_delay(out, check.delay.rise);
+ out << ")" << std::endl;
+ }
+ out << " )" << std::endl;
+ }
+ out << " )" << std::endl;
+ }
+ out << ")" << std::endl;
+ }
+};
+
+} // namespace SDF
+
+void Context::writeSDF(std::ostream &out, bool cvc_mode) const
+{
+ using namespace SDF;
+ SDFWriter wr;
+ wr.cvc_mode = cvc_mode;
+ wr.design = str_or_default(attrs, id("module"), "top");
+ wr.sdfversion = "3.0";
+ wr.vendor = "nextpnr";
+ wr.program = "nextpnr";
+
+ const double delay_scale = 1000;
+ // Convert from DelayQuad to SDF-friendly RiseFallDelay
+ auto convert_delay = [&](const DelayQuad &dly) {
+ RiseFallDelay rf;
+ rf.rise.min = getDelayNS(dly.minRiseDelay()) * delay_scale;
+ rf.rise.typ = getDelayNS((dly.minRiseDelay() + dly.maxRiseDelay()) / 2) * delay_scale; // fixme: typ delays?
+ rf.rise.max = getDelayNS(dly.maxRiseDelay()) * delay_scale;
+ rf.fall.min = getDelayNS(dly.minFallDelay()) * delay_scale;
+ rf.fall.typ = getDelayNS((dly.minFallDelay() + dly.maxFallDelay()) / 2) * delay_scale; // fixme: typ delays?
+ rf.fall.max = getDelayNS(dly.maxFallDelay()) * delay_scale;
+ return rf;
+ };
+
+ auto convert_setuphold = [&](const DelayPair &setup, const DelayPair &hold) {
+ RiseFallDelay rf;
+ rf.rise.min = getDelayNS(setup.minDelay()) * delay_scale;
+ rf.rise.typ = getDelayNS((setup.minDelay() + setup.maxDelay()) / 2) * delay_scale; // fixme: typ delays?
+ rf.rise.max = getDelayNS(setup.maxDelay()) * delay_scale;
+ rf.fall.min = getDelayNS(hold.minDelay()) * delay_scale;
+ rf.fall.typ = getDelayNS((hold.minDelay() + hold.maxDelay()) / 2) * delay_scale; // fixme: typ delays?
+ rf.fall.max = getDelayNS(hold.maxDelay()) * delay_scale;
+ return rf;
+ };
+
+ for (const auto &cell : cells) {
+ Cell sc;
+ const CellInfo *ci = cell.second.get();
+ sc.instance = ci->name.str(this);
+ sc.celltype = ci->type.str(this);
+ for (auto port : ci->ports) {
+ int clockCount = 0;
+ TimingPortClass cls = getPortTimingClass(ci, port.first, clockCount);
+ if (cls == TMG_IGNORE)
+ continue;
+ if (port.second.net == nullptr)
+ continue; // Ignore disconnected ports
+ if (port.second.type != PORT_IN) {
+ // Add combinational paths to this output (or inout)
+ for (auto other : ci->ports) {
+ if (other.second.net == nullptr)
+ continue;
+ if (other.second.type == PORT_OUT)
+ continue;
+ DelayQuad dly;
+ if (!getCellDelay(ci, other.first, port.first, dly))
+ continue;
+ IOPath iop;
+ iop.from = other.first.str(this);
+ iop.to = port.first.str(this);
+ iop.delay = convert_delay(dly);
+ sc.iopaths.push_back(iop);
+ }
+ // Add clock-to-output delays, also as IOPaths
+ if (cls == TMG_REGISTER_OUTPUT)
+ for (int i = 0; i < clockCount; i++) {
+ auto clkInfo = getPortClockingInfo(ci, port.first, i);
+ IOPath cqp;
+ cqp.from = clkInfo.clock_port.str(this);
+ cqp.to = port.first.str(this);
+ cqp.delay = convert_delay(clkInfo.clockToQ);
+ sc.iopaths.push_back(cqp);
+ }
+ }
+ if (port.second.type != PORT_OUT && cls == TMG_REGISTER_INPUT) {
+ // Add setup/hold checks
+ for (int i = 0; i < clockCount; i++) {
+ auto clkInfo = getPortClockingInfo(ci, port.first, i);
+ TimingCheck chk;
+ chk.from.edge = RISING_EDGE; // Add setup/hold checks equally for rising and falling edges
+ chk.from.port = port.first.str(this);
+ chk.to.edge = clkInfo.edge;
+ chk.to.port = clkInfo.clock_port.str(this);
+ chk.type = TimingCheck::SETUPHOLD;
+ chk.delay = convert_setuphold(clkInfo.setup, clkInfo.hold);
+ sc.checks.push_back(chk);
+ chk.from.edge = FALLING_EDGE;
+ sc.checks.push_back(chk);
+ }
+ }
+ }
+ wr.cells.push_back(sc);
+ }
+
+ for (auto &net : nets) {
+ NetInfo *ni = net.second.get();
+ if (ni->driver.cell == nullptr)
+ continue;
+ for (auto &usr : ni->users) {
+ Interconnect ic;
+ ic.from.cell = ni->driver.cell->name.str(this);
+ ic.from.port = ni->driver.port.str(this);
+ ic.to.cell = usr.cell->name.str(this);
+ ic.to.port = usr.port.str(this);
+ // FIXME: min/max routing delay
+ ic.delay = convert_delay(DelayQuad(getNetinfoRouteDelay(ni, usr)));
+ wr.conn.push_back(ic);
+ }
+ }
+ wr.write(out);
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/sso_array.h b/common/kernel/sso_array.h
new file mode 100644
index 00000000..80e7d1c1
--- /dev/null
+++ b/common/kernel/sso_array.h
@@ -0,0 +1,132 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef SSO_ARRAY_H
+#define SSO_ARRAY_H
+
+#include <cstdint>
+
+#include "nextpnr_assertions.h"
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// An small size optimised array that is statically allocated when the size is N or less; heap allocated otherwise
+template <typename T, std::size_t N> class SSOArray
+{
+ private:
+ union
+ {
+ T data_static[N];
+ T *data_heap;
+ };
+ std::size_t m_size;
+ inline bool is_heap() const { return (m_size > N); }
+ void alloc()
+ {
+ if (is_heap()) {
+ data_heap = new T[m_size];
+ }
+ }
+
+ public:
+ T *data() { return is_heap() ? data_heap : data_static; }
+ const T *data() const { return is_heap() ? data_heap : data_static; }
+ std::size_t size() const { return m_size; }
+
+ T *begin() { return data(); }
+ T *end() { return data() + m_size; }
+ const T *begin() const { return data(); }
+ const T *end() const { return data() + m_size; }
+
+ SSOArray() : m_size(0){};
+
+ SSOArray(std::size_t size, const T &init = T()) : m_size(size)
+ {
+ alloc();
+ std::fill(begin(), end(), init);
+ }
+
+ SSOArray(const SSOArray &other) : m_size(other.size())
+ {
+ alloc();
+ std::copy(other.begin(), other.end(), begin());
+ }
+
+ SSOArray(SSOArray &&other) : m_size(other.size())
+ {
+ if (is_heap())
+ data_heap = other.data_heap;
+ else
+ std::copy(other.begin(), other.end(), begin());
+ other.m_size = 0;
+ }
+ SSOArray &operator=(const SSOArray &other)
+ {
+ if (&other == this)
+ return *this;
+ if (is_heap())
+ delete[] data_heap;
+ m_size = other.m_size;
+ alloc();
+ std::copy(other.begin(), other.end(), begin());
+ return *this;
+ }
+
+ template <typename Tother> SSOArray(const Tother &other) : m_size(other.size())
+ {
+ alloc();
+ std::copy(other.begin(), other.end(), begin());
+ }
+
+ ~SSOArray()
+ {
+ if (is_heap()) {
+ delete[] data_heap;
+ }
+ }
+
+ bool operator==(const SSOArray &other) const
+ {
+ if (size() != other.size())
+ return false;
+ return std::equal(begin(), end(), other.begin());
+ }
+ bool operator!=(const SSOArray &other) const
+ {
+ if (size() != other.size())
+ return true;
+ return !std::equal(begin(), end(), other.begin());
+ }
+ T &operator[](std::size_t idx)
+ {
+ NPNR_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+ const T &operator[](std::size_t idx) const
+ {
+ NPNR_ASSERT(idx < m_size);
+ return data()[idx];
+ }
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/str_ring_buffer.cc b/common/kernel/str_ring_buffer.cc
new file mode 100644
index 00000000..443d8612
--- /dev/null
+++ b/common/kernel/str_ring_buffer.cc
@@ -0,0 +1,34 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "str_ring_buffer.h"
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+std::string &StrRingBuffer::next()
+{
+ std::string &s = buffer.at(index++);
+ if (index >= N)
+ index = 0;
+ return s;
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/str_ring_buffer.h b/common/kernel/str_ring_buffer.h
new file mode 100644
index 00000000..42583beb
--- /dev/null
+++ b/common/kernel/str_ring_buffer.h
@@ -0,0 +1,45 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 Claire Xenia Wolf <claire@yosyshq.com>
+ * Copyright (C) 2018 Serge Bazanski <q3k@q3k.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+#ifndef STR_RING_BUFFER_H
+#define STR_RING_BUFFER_H
+
+#include <array>
+#include <string>
+
+#include "nextpnr_namespaces.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// A ring buffer of strings, so we can return a simple const char * pointer for %s formatting - inspired by how logging
+// in Yosys works Let's just hope noone tries to log more than 100 things in one call....
+class StrRingBuffer
+{
+ private:
+ static const size_t N = 100;
+ std::array<std::string, N> buffer;
+ size_t index = 0;
+
+ public:
+ std::string &next();
+};
+
+NEXTPNR_NAMESPACE_END
+
+#endif /* STR_RING_BUFFER_H */
diff --git a/common/kernel/svg.cc b/common/kernel/svg.cc
new file mode 100644
index 00000000..c5e2ea36
--- /dev/null
+++ b/common/kernel/svg.cc
@@ -0,0 +1,152 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2020 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <boost/algorithm/string.hpp>
+#include <fstream>
+#include "log.h"
+#include "nextpnr.h"
+#include "util.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+namespace {
+struct SVGWriter
+{
+ const Context *ctx;
+ std::ostream &out;
+ float scale = 500.0;
+ bool hide_inactive = false;
+ SVGWriter(const Context *ctx, std::ostream &out) : ctx(ctx), out(out){};
+ const char *get_stroke_colour(GraphicElement::style_t style)
+ {
+ switch (style) {
+ case GraphicElement::STYLE_GRID:
+ return "#CCC";
+ case GraphicElement::STYLE_FRAME:
+ return "#808080";
+ case GraphicElement::STYLE_INACTIVE:
+ return "#C0C0C0";
+ case GraphicElement::STYLE_ACTIVE:
+ return "#FF3030";
+ default:
+ return "#000";
+ }
+ }
+
+ void write_decal(const DecalXY &dxy)
+ {
+ for (const auto &el : ctx->getDecalGraphics(dxy.decal)) {
+ if (el.style == GraphicElement::STYLE_HIDDEN ||
+ (hide_inactive && el.style == GraphicElement::STYLE_INACTIVE))
+ continue;
+ switch (el.type) {
+ case GraphicElement::TYPE_LINE:
+ case GraphicElement::TYPE_ARROW:
+ case GraphicElement::TYPE_LOCAL_LINE:
+ case GraphicElement::TYPE_LOCAL_ARROW:
+ out << stringf("<line x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" stroke=\"%s\"/>", (el.x1 + dxy.x) * scale,
+ (el.y1 + dxy.y) * scale, (el.x2 + dxy.x) * scale, (el.y2 + dxy.y) * scale,
+ get_stroke_colour(el.style))
+ << std::endl;
+ break;
+ case GraphicElement::TYPE_BOX:
+ out << stringf("<rect x=\"%f\" y=\"%f\" width=\"%f\" height=\"%f\" stroke=\"%s\" fill=\"%s\"/>",
+ (el.x1 + dxy.x) * scale, (el.y1 + dxy.y) * scale, (el.x2 - el.x1) * scale,
+ (el.y2 - el.y1) * scale, get_stroke_colour(el.style),
+ el.style == GraphicElement::STYLE_ACTIVE ? "#FF8080" : "none")
+ << std::endl;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ void operator()(const std::string &flags)
+ {
+ std::vector<std::string> options;
+ boost::algorithm::split(options, flags, boost::algorithm::is_space());
+ bool noroute = false;
+ for (const auto &opt : options) {
+ if (boost::algorithm::starts_with(opt, "scale=")) {
+ scale = float(std::stod(opt.substr(6)));
+ continue;
+ } else if (opt == "hide_routing") {
+ noroute = true;
+ } else if (opt == "hide_inactive") {
+ hide_inactive = true;
+ } else {
+ log_error("Unknown SVG option '%s'\n", opt.c_str());
+ }
+ }
+ float max_x = 0, max_y = 0;
+ for (auto group : ctx->getGroups()) {
+ auto decal = ctx->getGroupDecal(group);
+ for (auto el : ctx->getDecalGraphics(decal.decal)) {
+ max_x = std::max(max_x, decal.x + el.x1 + 1);
+ max_y = std::max(max_y, decal.y + el.y1 + 1);
+ }
+ }
+ for (auto bel : ctx->getBels()) {
+ auto decal = ctx->getBelDecal(bel);
+ for (auto el : ctx->getDecalGraphics(decal.decal)) {
+ max_x = std::max(max_x, decal.x + el.x1 + 1);
+ max_y = std::max(max_y, decal.y + el.y1 + 1);
+ }
+ }
+ for (auto wire : ctx->getWires()) {
+ auto decal = ctx->getWireDecal(wire);
+ for (auto el : ctx->getDecalGraphics(decal.decal)) {
+ max_x = std::max(max_x, decal.x + el.x1 + 1);
+ max_y = std::max(max_y, decal.y + el.y1 + 1);
+ }
+ }
+ for (auto pip : ctx->getPips()) {
+ auto decal = ctx->getPipDecal(pip);
+ for (auto el : ctx->getDecalGraphics(decal.decal)) {
+ max_x = std::max(max_x, decal.x + el.x1 + 1);
+ max_y = std::max(max_y, decal.y + el.y1 + 1);
+ }
+ }
+ out << "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" << std::endl;
+ out << stringf("<svg viewBox=\"0 0 %f %f\" width=\"%f\" height=\"%f\" xmlns=\"http://www.w3.org/2000/svg\">",
+ max_x * scale, max_y * scale, max_x * scale, max_y * scale)
+ << std::endl;
+ out << "<rect x=\"0\" y=\"0\" width=\"100%\" height=\"100%\" stroke=\"#fff\" fill=\"#fff\"/>" << std::endl;
+ for (auto group : ctx->getGroups())
+ write_decal(ctx->getGroupDecal(group));
+ for (auto bel : ctx->getBels())
+ write_decal(ctx->getBelDecal(bel));
+ if (!noroute) {
+ for (auto wire : ctx->getWires())
+ write_decal(ctx->getWireDecal(wire));
+ for (auto pip : ctx->getPips())
+ write_decal(ctx->getPipDecal(pip));
+ }
+ out << "</svg>" << std::endl;
+ }
+};
+} // namespace
+
+void Context::writeSVG(const std::string &filename, const std::string &flags) const
+{
+ std::ofstream out(filename);
+ SVGWriter(this, out)(flags);
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/timing.cc b/common/kernel/timing.cc
new file mode 100644
index 00000000..834785fb
--- /dev/null
+++ b/common/kernel/timing.cc
@@ -0,0 +1,1515 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ * Copyright (C) 2018 Eddie Hung <eddieh@ece.ubc.ca>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "timing.h"
+#include <algorithm>
+#include <boost/range/adaptor/reversed.hpp>
+#include <deque>
+#include <map>
+#include <utility>
+#include "log.h"
+#include "util.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+void TimingAnalyser::setup()
+{
+ init_ports();
+ get_cell_delays();
+ topo_sort();
+ setup_port_domains();
+ run();
+}
+
+void TimingAnalyser::run(bool update_route_delays)
+{
+ reset_times();
+ if (update_route_delays)
+ get_route_delays();
+ walk_forward();
+ walk_backward();
+ compute_slack();
+ compute_criticality();
+}
+
+void TimingAnalyser::init_ports()
+{
+ // Per cell port structures
+ for (auto &cell : ctx->cells) {
+ CellInfo *ci = cell.second.get();
+ for (auto &port : ci->ports) {
+ auto &data = ports[CellPortKey(ci->name, port.first)];
+ data.type = port.second.type;
+ data.cell_port = CellPortKey(ci->name, port.first);
+ }
+ }
+}
+
+void TimingAnalyser::get_cell_delays()
+{
+ for (auto &port : ports) {
+ CellInfo *ci = cell_info(port.first);
+ auto &pi = port_info(port.first);
+ auto &pd = port.second;
+
+ IdString name = port.first.port;
+ // Ignore dangling ports altogether for timing purposes
+ if (!pi.net)
+ continue;
+ pd.cell_arcs.clear();
+ int clkInfoCount = 0;
+ TimingPortClass cls = ctx->getPortTimingClass(ci, name, clkInfoCount);
+ if (cls == TMG_STARTPOINT || cls == TMG_ENDPOINT || cls == TMG_CLOCK_INPUT || cls == TMG_GEN_CLOCK ||
+ cls == TMG_IGNORE)
+ continue;
+ if (pi.type == PORT_IN) {
+ // Input ports might have setup/hold relationships
+ if (cls == TMG_REGISTER_INPUT) {
+ for (int i = 0; i < clkInfoCount; i++) {
+ auto info = ctx->getPortClockingInfo(ci, name, i);
+ if (!ci->ports.count(info.clock_port) || ci->ports.at(info.clock_port).net == nullptr)
+ continue;
+ pd.cell_arcs.emplace_back(CellArc::SETUP, info.clock_port, DelayQuad(info.setup, info.setup),
+ info.edge);
+ pd.cell_arcs.emplace_back(CellArc::HOLD, info.clock_port, DelayQuad(info.hold, info.hold),
+ info.edge);
+ }
+ }
+ // Combinational delays through cell
+ for (auto &other_port : ci->ports) {
+ auto &op = other_port.second;
+ // ignore dangling ports and non-outputs
+ if (op.net == nullptr || op.type != PORT_OUT)
+ continue;
+ DelayQuad delay;
+ bool is_path = ctx->getCellDelay(ci, name, other_port.first, delay);
+ if (is_path)
+ pd.cell_arcs.emplace_back(CellArc::COMBINATIONAL, other_port.first, delay);
+ }
+ } else if (pi.type == PORT_OUT) {
+ // Output ports might have clk-to-q relationships
+ if (cls == TMG_REGISTER_OUTPUT) {
+ for (int i = 0; i < clkInfoCount; i++) {
+ auto info = ctx->getPortClockingInfo(ci, name, i);
+ if (!ci->ports.count(info.clock_port) || ci->ports.at(info.clock_port).net == nullptr)
+ continue;
+ pd.cell_arcs.emplace_back(CellArc::CLK_TO_Q, info.clock_port, info.clockToQ, info.edge);
+ }
+ }
+ // Combinational delays through cell
+ for (auto &other_port : ci->ports) {
+ auto &op = other_port.second;
+ // ignore dangling ports and non-inputs
+ if (op.net == nullptr || op.type != PORT_IN)
+ continue;
+ DelayQuad delay;
+ bool is_path = ctx->getCellDelay(ci, other_port.first, name, delay);
+ if (is_path)
+ pd.cell_arcs.emplace_back(CellArc::COMBINATIONAL, other_port.first, delay);
+ }
+ }
+ }
+}
+
+void TimingAnalyser::get_route_delays()
+{
+ for (auto &net : ctx->nets) {
+ NetInfo *ni = net.second.get();
+ if (ni->driver.cell == nullptr || ni->driver.cell->bel == BelId())
+ continue;
+ for (auto &usr : ni->users) {
+ if (usr.cell->bel == BelId())
+ continue;
+ ports.at(CellPortKey(usr)).route_delay = DelayPair(ctx->getNetinfoRouteDelay(ni, usr));
+ }
+ }
+}
+
+void TimingAnalyser::set_route_delay(CellPortKey port, DelayPair value) { ports.at(port).route_delay = value; }
+
+void TimingAnalyser::topo_sort()
+{
+ TopoSort<CellPortKey> topo;
+ for (auto &port : ports) {
+ auto &pd = port.second;
+ // All ports are nodes
+ topo.node(port.first);
+ if (pd.type == PORT_IN) {
+ // inputs: combinational arcs through the cell are edges
+ for (auto &arc : pd.cell_arcs) {
+ if (arc.type != CellArc::COMBINATIONAL)
+ continue;
+ topo.edge(port.first, CellPortKey(port.first.cell, arc.other_port));
+ }
+ } else if (pd.type == PORT_OUT) {
+ // output: routing arcs are edges
+ const NetInfo *pn = port_info(port.first).net;
+ if (pn != nullptr) {
+ for (auto &usr : pn->users)
+ topo.edge(port.first, CellPortKey(usr));
+ }
+ }
+ }
+ bool no_loops = topo.sort();
+ if (!no_loops && verbose_mode) {
+ log_info("Found %d combinational loops:\n", int(topo.loops.size()));
+ int i = 0;
+ for (auto &loop : topo.loops) {
+ log_info(" loop %d:\n", ++i);
+ for (auto &port : loop) {
+ log_info(" %s.%s (%s)\n", ctx->nameOf(port.cell), ctx->nameOf(port.port),
+ ctx->nameOf(port_info(port).net));
+ }
+ }
+ }
+ have_loops = !no_loops;
+ std::swap(topological_order, topo.sorted);
+}
+
+void TimingAnalyser::setup_port_domains()
+{
+ for (auto &d : domains) {
+ d.startpoints.clear();
+ d.endpoints.clear();
+ }
+ // Go forward through the topological order (domains from the PoV of arrival time)
+ bool first_iter = true;
+ do {
+ updated_domains = false;
+ for (auto port : topological_order) {
+ auto &pd = ports.at(port);
+ auto &pi = port_info(port);
+ if (pi.type == PORT_OUT) {
+ if (first_iter) {
+ for (auto &fanin : pd.cell_arcs) {
+ if (fanin.type != CellArc::CLK_TO_Q)
+ continue;
+ // registered outputs are startpoints
+ auto dom = domain_id(port.cell, fanin.other_port, fanin.edge);
+ // create per-domain data
+ pd.arrival[dom];
+ domains.at(dom).startpoints.emplace_back(port, fanin.other_port);
+ }
+ }
+ // copy domains across routing
+ if (pi.net != nullptr)
+ for (auto &usr : pi.net->users)
+ copy_domains(port, CellPortKey(usr), false);
+ } else {
+ // copy domains from input to output
+ for (auto &fanout : pd.cell_arcs) {
+ if (fanout.type != CellArc::COMBINATIONAL)
+ continue;
+ copy_domains(port, CellPortKey(port.cell, fanout.other_port), false);
+ }
+ }
+ }
+ // Go backward through the topological order (domains from the PoV of required time)
+ for (auto port : reversed_range(topological_order)) {
+ auto &pd = ports.at(port);
+ auto &pi = port_info(port);
+ if (pi.type == PORT_OUT) {
+ // copy domains from output to input
+ for (auto &fanin : pd.cell_arcs) {
+ if (fanin.type != CellArc::COMBINATIONAL)
+ continue;
+ copy_domains(port, CellPortKey(port.cell, fanin.other_port), true);
+ }
+ } else {
+ if (first_iter) {
+ for (auto &fanout : pd.cell_arcs) {
+ if (fanout.type != CellArc::SETUP)
+ continue;
+ // registered inputs are endpoints
+ auto dom = domain_id(port.cell, fanout.other_port, fanout.edge);
+ // create per-domain data
+ pd.required[dom];
+ domains.at(dom).endpoints.emplace_back(port, fanout.other_port);
+ }
+ }
+ // copy port to driver
+ if (pi.net != nullptr && pi.net->driver.cell != nullptr)
+ copy_domains(port, CellPortKey(pi.net->driver), true);
+ }
+ }
+ // Iterate over ports and find domain paris
+ for (auto port : topological_order) {
+ auto &pd = ports.at(port);
+ for (auto &arr : pd.arrival)
+ for (auto &req : pd.required) {
+ pd.domain_pairs[domain_pair_id(arr.first, req.first)];
+ }
+ }
+ first_iter = false;
+ // If there are loops, repeat the process until a fixed point is reached, as there might be unusual ways to
+ // visit points, which would result in a missing domain key and therefore crash later on
+ } while (have_loops && updated_domains);
+ for (auto &dp : domain_pairs) {
+ auto &launch_data = domains.at(dp.key.launch);
+ auto &capture_data = domains.at(dp.key.capture);
+ if (launch_data.key.clock != capture_data.key.clock)
+ continue;
+ IdString clk = launch_data.key.clock;
+ delay_t period = ctx->getDelayFromNS(1.0e9 / ctx->setting<float>("target_freq"));
+ if (ctx->nets.count(clk)) {
+ NetInfo *clk_net = ctx->nets.at(clk).get();
+ if (clk_net->clkconstr) {
+ period = clk_net->clkconstr->period.minDelay();
+ }
+ }
+ if (launch_data.key.edge != capture_data.key.edge)
+ period /= 2;
+ dp.period = DelayPair(period);
+ }
+}
+
+void TimingAnalyser::reset_times()
+{
+ for (auto &port : ports) {
+ auto do_reset = [&](dict<domain_id_t, ArrivReqTime> &times) {
+ for (auto &t : times) {
+ t.second.value = init_delay;
+ t.second.path_length = 0;
+ t.second.bwd_min = CellPortKey();
+ t.second.bwd_max = CellPortKey();
+ }
+ };
+ do_reset(port.second.arrival);
+ do_reset(port.second.required);
+ for (auto &dp : port.second.domain_pairs) {
+ dp.second.setup_slack = std::numeric_limits<delay_t>::max();
+ dp.second.hold_slack = std::numeric_limits<delay_t>::max();
+ dp.second.max_path_length = 0;
+ dp.second.criticality = 0;
+ dp.second.budget = 0;
+ }
+ port.second.worst_crit = 0;
+ port.second.worst_setup_slack = std::numeric_limits<delay_t>::max();
+ port.second.worst_hold_slack = std::numeric_limits<delay_t>::max();
+ }
+}
+
+void TimingAnalyser::set_arrival_time(CellPortKey target, domain_id_t domain, DelayPair arrival, int path_length,
+ CellPortKey prev)
+{
+ auto &arr = ports.at(target).arrival.at(domain);
+ if (arrival.max_delay > arr.value.max_delay) {
+ arr.value.max_delay = arrival.max_delay;
+ arr.bwd_max = prev;
+ }
+ if (!setup_only && (arrival.min_delay < arr.value.min_delay)) {
+ arr.value.min_delay = arrival.min_delay;
+ arr.bwd_min = prev;
+ }
+ arr.path_length = std::max(arr.path_length, path_length);
+}
+
+void TimingAnalyser::set_required_time(CellPortKey target, domain_id_t domain, DelayPair required, int path_length,
+ CellPortKey prev)
+{
+ auto &req = ports.at(target).required.at(domain);
+ if (required.min_delay < req.value.min_delay) {
+ req.value.min_delay = required.min_delay;
+ req.bwd_min = prev;
+ }
+ if (!setup_only && (required.max_delay > req.value.max_delay)) {
+ req.value.max_delay = required.max_delay;
+ req.bwd_max = prev;
+ }
+ req.path_length = std::max(req.path_length, path_length);
+}
+
+void TimingAnalyser::walk_forward()
+{
+ // Assign initial arrival time to domain startpoints
+ for (domain_id_t dom_id = 0; dom_id < domain_id_t(domains.size()); ++dom_id) {
+ auto &dom = domains.at(dom_id);
+ for (auto &sp : dom.startpoints) {
+ auto &pd = ports.at(sp.first);
+ DelayPair init_arrival(0);
+ CellPortKey clock_key;
+ // TODO: clock routing delay, if analysis of that is enabled
+ if (sp.second != IdString()) {
+ // clocked startpoints have a clock-to-out time
+ for (auto &fanin : pd.cell_arcs) {
+ if (fanin.type == CellArc::CLK_TO_Q && fanin.other_port == sp.second) {
+ init_arrival = init_arrival + fanin.value.delayPair();
+ break;
+ }
+ }
+ clock_key = CellPortKey(sp.first.cell, sp.second);
+ }
+ set_arrival_time(sp.first, dom_id, init_arrival, 1, clock_key);
+ }
+ }
+ // Walk forward in topological order
+ for (auto p : topological_order) {
+ auto &pd = ports.at(p);
+ for (auto &arr : pd.arrival) {
+ if (pd.type == PORT_OUT) {
+ // Output port: propagate delay through net, adding route delay
+ NetInfo *net = port_info(p).net;
+ if (net != nullptr)
+ for (auto &usr : net->users) {
+ CellPortKey usr_key(usr);
+ auto &usr_pd = ports.at(usr_key);
+ set_arrival_time(usr_key, arr.first, arr.second.value + usr_pd.route_delay,
+ arr.second.path_length, p);
+ }
+ } else if (pd.type == PORT_IN) {
+ // Input port; propagate delay through cell, adding combinational delay
+ for (auto &fanout : pd.cell_arcs) {
+ if (fanout.type != CellArc::COMBINATIONAL)
+ continue;
+ set_arrival_time(CellPortKey(p.cell, fanout.other_port), arr.first,
+ arr.second.value + fanout.value.delayPair(), arr.second.path_length + 1, p);
+ }
+ }
+ }
+ }
+}
+
+void TimingAnalyser::walk_backward()
+{
+ // Assign initial required time to domain endpoints
+ // Note that clock frequency will be considered later in the analysis for, for now all required times are normalised
+ // to 0ns
+ for (domain_id_t dom_id = 0; dom_id < domain_id_t(domains.size()); ++dom_id) {
+ auto &dom = domains.at(dom_id);
+ for (auto &ep : dom.endpoints) {
+ auto &pd = ports.at(ep.first);
+ DelayPair init_setuphold(0);
+ CellPortKey clock_key;
+ // TODO: clock routing delay, if analysis of that is enabled
+ if (ep.second != IdString()) {
+ // Add setup/hold time, if this endpoint is clocked
+ for (auto &fanin : pd.cell_arcs) {
+ if (fanin.type == CellArc::SETUP && fanin.other_port == ep.second)
+ init_setuphold.min_delay -= fanin.value.maxDelay();
+ if (fanin.type == CellArc::HOLD && fanin.other_port == ep.second)
+ init_setuphold.max_delay -= fanin.value.maxDelay();
+ }
+ clock_key = CellPortKey(ep.first.cell, ep.second);
+ }
+ set_required_time(ep.first, dom_id, init_setuphold, 1, clock_key);
+ }
+ }
+ // Walk backwards in topological order
+ for (auto p : reversed_range(topological_order)) {
+ auto &pd = ports.at(p);
+ for (auto &req : pd.required) {
+ if (pd.type == PORT_IN) {
+ // Input port: propagate delay back through net, subtracting route delay
+ NetInfo *net = port_info(p).net;
+ if (net != nullptr && net->driver.cell != nullptr)
+ set_required_time(CellPortKey(net->driver), req.first,
+ req.second.value - DelayPair(pd.route_delay.maxDelay()), req.second.path_length,
+ p);
+ } else if (pd.type == PORT_OUT) {
+ // Output port : propagate delay back through cell, subtracting combinational delay
+ for (auto &fanin : pd.cell_arcs) {
+ if (fanin.type != CellArc::COMBINATIONAL)
+ continue;
+ set_required_time(CellPortKey(p.cell, fanin.other_port), req.first,
+ req.second.value - DelayPair(fanin.value.maxDelay()), req.second.path_length + 1,
+ p);
+ }
+ }
+ }
+ }
+}
+
+void TimingAnalyser::print_fmax()
+{
+ // Temporary testing code for comparison only
+ dict<int, double> domain_fmax;
+ for (auto p : topological_order) {
+ auto &pd = ports.at(p);
+ for (auto &req : pd.required) {
+ if (pd.arrival.count(req.first)) {
+ auto &arr = pd.arrival.at(req.first);
+ double fmax = 1000.0 / ctx->getDelayNS(arr.value.maxDelay() - req.second.value.minDelay());
+ if (!domain_fmax.count(req.first) || domain_fmax.at(req.first) > fmax)
+ domain_fmax[req.first] = fmax;
+ }
+ }
+ }
+ for (auto &fm : domain_fmax) {
+ log_info("Domain %s Worst Fmax %.02f\n", ctx->nameOf(domains.at(fm.first).key.clock), fm.second);
+ }
+}
+
+void TimingAnalyser::compute_slack()
+{
+ for (auto &dp : domain_pairs) {
+ dp.worst_setup_slack = std::numeric_limits<delay_t>::max();
+ dp.worst_hold_slack = std::numeric_limits<delay_t>::max();
+ }
+ for (auto p : topological_order) {
+ auto &pd = ports.at(p);
+ for (auto &pdp : pd.domain_pairs) {
+ auto &dp = domain_pairs.at(pdp.first);
+ auto &arr = pd.arrival.at(dp.key.launch);
+ auto &req = pd.required.at(dp.key.capture);
+ pdp.second.setup_slack = 0 - (arr.value.maxDelay() - req.value.minDelay());
+ if (!setup_only)
+ pdp.second.hold_slack = arr.value.minDelay() - req.value.maxDelay();
+ pdp.second.max_path_length = arr.path_length + req.path_length;
+ if (dp.key.launch == dp.key.capture)
+ pd.worst_setup_slack = std::min(pd.worst_setup_slack, dp.period.minDelay() + pdp.second.setup_slack);
+ dp.worst_setup_slack = std::min(dp.worst_setup_slack, pdp.second.setup_slack);
+ if (!setup_only) {
+ pd.worst_hold_slack = std::min(pd.worst_hold_slack, pdp.second.hold_slack);
+ dp.worst_hold_slack = std::min(dp.worst_hold_slack, pdp.second.hold_slack);
+ }
+ }
+ }
+}
+
+void TimingAnalyser::compute_criticality()
+{
+ for (auto p : topological_order) {
+ auto &pd = ports.at(p);
+ for (auto &pdp : pd.domain_pairs) {
+ auto &dp = domain_pairs.at(pdp.first);
+ float crit =
+ 1.0f - (float(pdp.second.setup_slack) - float(dp.worst_setup_slack)) / float(-dp.worst_setup_slack);
+ crit = std::min(crit, 1.0f);
+ crit = std::max(crit, 0.0f);
+ pdp.second.criticality = crit;
+ pd.worst_crit = std::max(pd.worst_crit, crit);
+ }
+ }
+}
+
+std::vector<CellPortKey> TimingAnalyser::get_failing_eps(domain_id_t domain_pair, int count)
+{
+ std::vector<CellPortKey> failing_eps;
+ delay_t last_slack = std::numeric_limits<delay_t>::min();
+ auto &dp = domain_pairs.at(domain_pair);
+ auto &cap_d = domains.at(dp.key.capture);
+ while (int(failing_eps.size()) < count) {
+ CellPortKey next;
+ delay_t next_slack = std::numeric_limits<delay_t>::max();
+ for (auto ep : cap_d.endpoints) {
+ auto &pd = ports.at(ep.first);
+ if (!pd.domain_pairs.count(domain_pair))
+ continue;
+ delay_t ep_slack = pd.domain_pairs.at(domain_pair).setup_slack;
+ if (ep_slack < next_slack && ep_slack > last_slack) {
+ next = ep.first;
+ next_slack = ep_slack;
+ }
+ }
+ if (next == CellPortKey())
+ break;
+ failing_eps.push_back(next);
+ last_slack = next_slack;
+ }
+ return failing_eps;
+}
+
+void TimingAnalyser::print_critical_path(CellPortKey endpoint, domain_id_t domain_pair)
+{
+ CellPortKey cursor = endpoint;
+ auto &dp = domain_pairs.at(domain_pair);
+ log(" endpoint %s.%s (slack %.02fns):\n", ctx->nameOf(cursor.cell), ctx->nameOf(cursor.port),
+ ctx->getDelayNS(ports.at(cursor).domain_pairs.at(domain_pair).setup_slack));
+ while (cursor != CellPortKey()) {
+ log(" %s.%s (net %s)\n", ctx->nameOf(cursor.cell), ctx->nameOf(cursor.port),
+ ctx->nameOf(get_net_or_empty(ctx->cells.at(cursor.cell).get(), cursor.port)));
+ if (!ports.at(cursor).arrival.count(dp.key.launch))
+ break;
+ cursor = ports.at(cursor).arrival.at(dp.key.launch).bwd_max;
+ }
+}
+
+namespace {
+const char *edge_name(ClockEdge edge) { return (edge == FALLING_EDGE) ? "negedge" : "posedge"; }
+} // namespace
+
+void TimingAnalyser::print_report()
+{
+ for (int i = 0; i < int(domain_pairs.size()); i++) {
+ auto &dp = domain_pairs.at(i);
+ auto &launch = domains.at(dp.key.launch);
+ auto &capture = domains.at(dp.key.capture);
+ log("Worst endpoints for %s %s -> %s %s\n", edge_name(launch.key.edge), ctx->nameOf(launch.key.clock),
+ edge_name(capture.key.edge), ctx->nameOf(capture.key.clock));
+ auto failing_eps = get_failing_eps(i, 5);
+ for (auto &ep : failing_eps)
+ print_critical_path(ep, i);
+ log_break();
+ }
+}
+
+domain_id_t TimingAnalyser::domain_id(IdString cell, IdString clock_port, ClockEdge edge)
+{
+ return domain_id(ctx->cells.at(cell)->ports.at(clock_port).net, edge);
+}
+domain_id_t TimingAnalyser::domain_id(const NetInfo *net, ClockEdge edge)
+{
+ NPNR_ASSERT(net != nullptr);
+ ClockDomainKey key{net->name, edge};
+ auto inserted = domain_to_id.emplace(key, domains.size());
+ if (inserted.second) {
+ domains.emplace_back(key);
+ }
+ return inserted.first->second;
+}
+domain_id_t TimingAnalyser::domain_pair_id(domain_id_t launch, domain_id_t capture)
+{
+ ClockDomainPairKey key{launch, capture};
+ auto inserted = pair_to_id.emplace(key, domain_pairs.size());
+ if (inserted.second) {
+ domain_pairs.emplace_back(key);
+ }
+ return inserted.first->second;
+}
+
+void TimingAnalyser::copy_domains(const CellPortKey &from, const CellPortKey &to, bool backward)
+{
+ auto &f = ports.at(from), &t = ports.at(to);
+ for (auto &dom : (backward ? f.required : f.arrival)) {
+ updated_domains |= (backward ? t.required : t.arrival).emplace(dom.first, ArrivReqTime{}).second;
+ }
+}
+
+CellInfo *TimingAnalyser::cell_info(const CellPortKey &key) { return ctx->cells.at(key.cell).get(); }
+
+PortInfo &TimingAnalyser::port_info(const CellPortKey &key) { return ctx->cells.at(key.cell)->ports.at(key.port); }
+
+/** LEGACY CODE BEGIN **/
+
+typedef std::vector<const PortRef *> PortRefVector;
+typedef std::map<int, unsigned> DelayFrequency;
+
+struct CriticalPathData
+{
+ PortRefVector ports;
+ delay_t path_delay;
+ delay_t path_period;
+};
+
+typedef dict<ClockPair, CriticalPathData> CriticalPathDataMap;
+
+typedef dict<IdString, std::vector<NetSinkTiming>> DetailedNetTimings;
+
+struct Timing
+{
+ Context *ctx;
+ bool net_delays;
+ bool update;
+ delay_t min_slack;
+ CriticalPathDataMap *crit_path;
+ DelayFrequency *slack_histogram;
+ DetailedNetTimings *detailed_net_timings;
+ IdString async_clock;
+
+ struct TimingData
+ {
+ TimingData() : max_arrival(), max_path_length(), min_remaining_budget() {}
+ TimingData(delay_t max_arrival) : max_arrival(max_arrival), max_path_length(), min_remaining_budget() {}
+ delay_t max_arrival;
+ unsigned max_path_length = 0;
+ delay_t min_remaining_budget;
+ bool false_startpoint = false;
+ std::vector<delay_t> min_required;
+ dict<ClockEvent, delay_t> arrival_time;
+ };
+
+ Timing(Context *ctx, bool net_delays, bool update, CriticalPathDataMap *crit_path = nullptr,
+ DelayFrequency *slack_histogram = nullptr, DetailedNetTimings *detailed_net_timings = nullptr)
+ : ctx(ctx), net_delays(net_delays), update(update), min_slack(1.0e12 / ctx->setting<float>("target_freq")),
+ crit_path(crit_path), slack_histogram(slack_histogram), detailed_net_timings(detailed_net_timings),
+ async_clock(ctx->id("$async$"))
+ {
+ }
+
+ delay_t walk_paths()
+ {
+ const auto clk_period = ctx->getDelayFromNS(1.0e9 / ctx->setting<float>("target_freq"));
+
+ // First, compute the topological order of nets to walk through the circuit, assuming it is a _acyclic_ graph
+ // TODO(eddieh): Handle the case where it is cyclic, e.g. combinatorial loops
+ std::vector<NetInfo *> topological_order;
+ dict<const NetInfo *, dict<ClockEvent, TimingData>, hash_ptr_ops> net_data;
+ // In lieu of deleting edges from the graph, simply count the number of fanins to each output port
+ dict<const PortInfo *, unsigned, hash_ptr_ops> port_fanin;
+
+ std::vector<IdString> input_ports;
+ std::vector<const PortInfo *> output_ports;
+
+ pool<IdString> ooc_port_nets;
+
+ // In out-of-context mode, top-level inputs look floating but aren't
+ if (bool_or_default(ctx->settings, ctx->id("arch.ooc"))) {
+ for (auto &p : ctx->ports) {
+ if (p.second.type != PORT_IN || p.second.net == nullptr)
+ continue;
+ ooc_port_nets.insert(p.second.net->name);
+ }
+ }
+
+ for (auto &cell : ctx->cells) {
+ input_ports.clear();
+ output_ports.clear();
+ for (auto &port : cell.second->ports) {
+ if (!port.second.net)
+ continue;
+ if (port.second.type == PORT_OUT)
+ output_ports.push_back(&port.second);
+ else
+ input_ports.push_back(port.first);
+ }
+
+ for (auto o : output_ports) {
+ int clocks = 0;
+ TimingPortClass portClass = ctx->getPortTimingClass(cell.second.get(), o->name, clocks);
+ // If output port is influenced by a clock (e.g. FF output) then add it to the ordering as a timing
+ // start-point
+ if (portClass == TMG_REGISTER_OUTPUT) {
+ topological_order.emplace_back(o->net);
+ for (int i = 0; i < clocks; i++) {
+ TimingClockingInfo clkInfo = ctx->getPortClockingInfo(cell.second.get(), o->name, i);
+ const NetInfo *clknet = get_net_or_empty(cell.second.get(), clkInfo.clock_port);
+ IdString clksig = clknet ? clknet->name : async_clock;
+ net_data[o->net][ClockEvent{clksig, clknet ? clkInfo.edge : RISING_EDGE}] =
+ TimingData{clkInfo.clockToQ.maxDelay()};
+ }
+
+ } else {
+ if (portClass == TMG_STARTPOINT || portClass == TMG_GEN_CLOCK || portClass == TMG_IGNORE) {
+ topological_order.emplace_back(o->net);
+ TimingData td;
+ td.false_startpoint = (portClass == TMG_GEN_CLOCK || portClass == TMG_IGNORE);
+ td.max_arrival = 0;
+ net_data[o->net][ClockEvent{async_clock, RISING_EDGE}] = td;
+ }
+
+ // Don't analyse paths from a clock input to other pins - they will be considered by the
+ // special-case handling register input/output class ports
+ if (portClass == TMG_CLOCK_INPUT)
+ continue;
+
+ // Otherwise, for all driven input ports on this cell, if a timing arc exists between the input and
+ // the current output port, increment fanin counter
+ for (auto i : input_ports) {
+ DelayQuad comb_delay;
+ NetInfo *i_net = cell.second->ports[i].net;
+ if (i_net->driver.cell == nullptr && !ooc_port_nets.count(i_net->name))
+ continue;
+ bool is_path = ctx->getCellDelay(cell.second.get(), i, o->name, comb_delay);
+ if (is_path)
+ port_fanin[o]++;
+ }
+ // If there is no fanin, add the port as a false startpoint
+ if (!port_fanin.count(o) && !net_data.count(o->net)) {
+ topological_order.emplace_back(o->net);
+ TimingData td;
+ td.false_startpoint = true;
+ td.max_arrival = 0;
+ net_data[o->net][ClockEvent{async_clock, RISING_EDGE}] = td;
+ }
+ }
+ }
+ }
+
+ // In out-of-context mode, handle top-level ports correctly
+ if (bool_or_default(ctx->settings, ctx->id("arch.ooc"))) {
+ for (auto &p : ctx->ports) {
+ if (p.second.type != PORT_IN || p.second.net == nullptr)
+ continue;
+ topological_order.emplace_back(p.second.net);
+ }
+ }
+
+ std::deque<NetInfo *> queue(topological_order.begin(), topological_order.end());
+ // Now walk the design, from the start points identified previously, building up a topological order
+ while (!queue.empty()) {
+ const auto net = queue.front();
+ queue.pop_front();
+
+ for (auto &usr : net->users) {
+ int user_clocks;
+ TimingPortClass usrClass = ctx->getPortTimingClass(usr.cell, usr.port, user_clocks);
+ if (usrClass == TMG_IGNORE || usrClass == TMG_CLOCK_INPUT)
+ continue;
+ for (auto &port : usr.cell->ports) {
+ if (port.second.type != PORT_OUT || !port.second.net)
+ continue;
+ int port_clocks;
+ TimingPortClass portClass = ctx->getPortTimingClass(usr.cell, port.first, port_clocks);
+
+ // Skip if this is a clocked output (but allow non-clocked ones)
+ if (portClass == TMG_REGISTER_OUTPUT || portClass == TMG_STARTPOINT || portClass == TMG_IGNORE ||
+ portClass == TMG_GEN_CLOCK)
+ continue;
+ DelayQuad comb_delay;
+ bool is_path = ctx->getCellDelay(usr.cell, usr.port, port.first, comb_delay);
+ if (!is_path)
+ continue;
+ // Decrement the fanin count, and only add to topological order if all its fanins have already
+ // been visited
+ auto it = port_fanin.find(&port.second);
+ if (it == port_fanin.end())
+ log_error("Timing counted negative fanin count for port %s.%s (net %s), please report this "
+ "error.\n",
+ ctx->nameOf(usr.cell), ctx->nameOf(port.first), ctx->nameOf(port.second.net));
+ if (--it->second == 0) {
+ topological_order.emplace_back(port.second.net);
+ queue.emplace_back(port.second.net);
+ port_fanin.erase(it);
+ }
+ }
+ }
+ }
+
+ // Sanity check to ensure that all ports where fanins were recorded were indeed visited
+ if (!port_fanin.empty() && !bool_or_default(ctx->settings, ctx->id("timing/ignoreLoops"), false)) {
+ for (auto fanin : port_fanin) {
+ NetInfo *net = fanin.first->net;
+ if (net != nullptr) {
+ log_info(" remaining fanin includes %s (net %s)\n", fanin.first->name.c_str(ctx),
+ net->name.c_str(ctx));
+ if (net->driver.cell != nullptr)
+ log_info(" driver = %s.%s\n", net->driver.cell->name.c_str(ctx),
+ net->driver.port.c_str(ctx));
+ for (auto net_user : net->users)
+ log_info(" user: %s.%s\n", net_user.cell->name.c_str(ctx), net_user.port.c_str(ctx));
+ } else {
+ log_info(" remaining fanin includes %s (no net)\n", fanin.first->name.c_str(ctx));
+ }
+ }
+ if (ctx->force)
+ log_warning("timing analysis failed due to presence of combinatorial loops, incomplete specification "
+ "of timing ports, etc.\n");
+ else
+ log_error("timing analysis failed due to presence of combinatorial loops, incomplete specification of "
+ "timing ports, etc.\n");
+ }
+
+ // Go forwards topologically to find the maximum arrival time and max path length for each net
+ std::vector<ClockEvent> startdomains;
+ for (auto net : topological_order) {
+ if (!net_data.count(net))
+ continue;
+ // Updates later on might invalidate a reference taken here to net_data, so iterate over a list of domains
+ // instead
+ startdomains.clear();
+ {
+ auto &nd_map = net_data.at(net);
+ for (auto &startdomain : nd_map)
+ startdomains.push_back(startdomain.first);
+ }
+ for (auto &start_clk : startdomains) {
+ auto &nd = net_data.at(net).at(start_clk);
+ if (nd.false_startpoint)
+ continue;
+ const auto net_arrival = nd.max_arrival;
+ const auto net_length_plus_one = nd.max_path_length + 1;
+ nd.min_remaining_budget = clk_period;
+ for (auto &usr : net->users) {
+ int port_clocks;
+ TimingPortClass portClass = ctx->getPortTimingClass(usr.cell, usr.port, port_clocks);
+ auto net_delay = net_delays ? ctx->getNetinfoRouteDelay(net, usr) : delay_t();
+ auto usr_arrival = net_arrival + net_delay;
+
+ if (portClass == TMG_ENDPOINT || portClass == TMG_IGNORE || portClass == TMG_CLOCK_INPUT) {
+ // Skip
+ } else {
+ auto budget_override = ctx->getBudgetOverride(net, usr, net_delay);
+ // Iterate over all output ports on the same cell as the sink
+ for (auto port : usr.cell->ports) {
+ if (port.second.type != PORT_OUT || !port.second.net)
+ continue;
+ DelayQuad comb_delay;
+ // Look up delay through this path
+ bool is_path = ctx->getCellDelay(usr.cell, usr.port, port.first, comb_delay);
+ if (!is_path)
+ continue;
+ auto &data = net_data[port.second.net][start_clk];
+ auto &arrival = data.max_arrival;
+ arrival = std::max(arrival, usr_arrival + comb_delay.maxDelay());
+ if (!budget_override) { // Do not increment path length if budget overridden since it
+ // doesn't
+ // require a share of the slack
+ auto &path_length = data.max_path_length;
+ path_length = std::max(path_length, net_length_plus_one);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ dict<ClockPair, std::pair<delay_t, NetInfo *>> crit_nets;
+
+ // Now go backwards topologically to determine the minimum path slack, and to distribute all path slack evenly
+ // between all nets on the path
+ for (auto net : boost::adaptors::reverse(topological_order)) {
+ if (!net_data.count(net))
+ continue;
+ auto &nd_map = net_data.at(net);
+ for (auto &startdomain : nd_map) {
+ auto &nd = startdomain.second;
+ // Ignore false startpoints
+ if (nd.false_startpoint)
+ continue;
+ const delay_t net_length_plus_one = nd.max_path_length + 1;
+ auto &net_min_remaining_budget = nd.min_remaining_budget;
+ for (auto &usr : net->users) {
+ auto net_delay = net_delays ? ctx->getNetinfoRouteDelay(net, usr) : delay_t();
+ auto budget_override = ctx->getBudgetOverride(net, usr, net_delay);
+ int port_clocks;
+ TimingPortClass portClass = ctx->getPortTimingClass(usr.cell, usr.port, port_clocks);
+ if (portClass == TMG_REGISTER_INPUT || portClass == TMG_ENDPOINT) {
+ auto process_endpoint = [&](IdString clksig, ClockEdge edge, delay_t setup) {
+ const auto net_arrival = nd.max_arrival;
+ const auto endpoint_arrival = net_arrival + net_delay + setup;
+ delay_t period;
+ // Set default period
+ if (edge == startdomain.first.edge) {
+ period = clk_period;
+ } else {
+ period = clk_period / 2;
+ }
+ if (clksig != async_clock) {
+ if (ctx->nets.at(clksig)->clkconstr) {
+ if (edge == startdomain.first.edge) {
+ // same edge
+ period = ctx->nets.at(clksig)->clkconstr->period.minDelay();
+ } else if (edge == RISING_EDGE) {
+ // falling -> rising
+ period = ctx->nets.at(clksig)->clkconstr->low.minDelay();
+ } else if (edge == FALLING_EDGE) {
+ // rising -> falling
+ period = ctx->nets.at(clksig)->clkconstr->high.minDelay();
+ }
+ }
+ }
+ auto path_budget = period - endpoint_arrival;
+
+ if (update) {
+ auto budget_share = budget_override ? 0 : path_budget / net_length_plus_one;
+ usr.budget = std::min(usr.budget, net_delay + budget_share);
+ net_min_remaining_budget =
+ std::min(net_min_remaining_budget, path_budget - budget_share);
+ }
+
+ if (path_budget < min_slack)
+ min_slack = path_budget;
+
+ if (slack_histogram) {
+ int slack_ps = ctx->getDelayNS(path_budget) * 1000;
+ (*slack_histogram)[slack_ps]++;
+ }
+ ClockEvent dest_ev{clksig, edge};
+ ClockPair clockPair{startdomain.first, dest_ev};
+ nd.arrival_time[dest_ev] = std::max(nd.arrival_time[dest_ev], endpoint_arrival);
+
+ // Store the detailed timing for each net and user (a.k.a. sink)
+ if (detailed_net_timings) {
+ NetSinkTiming sink_timing;
+ sink_timing.clock_pair = clockPair;
+ sink_timing.cell_port = std::make_pair(usr.cell->name, usr.port);
+ sink_timing.delay = endpoint_arrival;
+ sink_timing.budget = period;
+
+ (*detailed_net_timings)[net->name].push_back(sink_timing);
+ }
+
+ if (crit_path) {
+ if (!crit_nets.count(clockPair) || crit_nets.at(clockPair).first < endpoint_arrival) {
+ crit_nets[clockPair] = std::make_pair(endpoint_arrival, net);
+ (*crit_path)[clockPair].path_delay = endpoint_arrival;
+ (*crit_path)[clockPair].path_period = period;
+ (*crit_path)[clockPair].ports.clear();
+ (*crit_path)[clockPair].ports.push_back(&usr);
+ }
+ }
+ };
+ if (portClass == TMG_REGISTER_INPUT) {
+ for (int i = 0; i < port_clocks; i++) {
+ TimingClockingInfo clkInfo = ctx->getPortClockingInfo(usr.cell, usr.port, i);
+ const NetInfo *clknet = get_net_or_empty(usr.cell, clkInfo.clock_port);
+ IdString clksig = clknet ? clknet->name : async_clock;
+ process_endpoint(clksig, clknet ? clkInfo.edge : RISING_EDGE, clkInfo.setup.maxDelay());
+ }
+ } else {
+ process_endpoint(async_clock, RISING_EDGE, 0);
+ }
+
+ } else if (update) {
+
+ // Iterate over all output ports on the same cell as the sink
+ for (const auto &port : usr.cell->ports) {
+ if (port.second.type != PORT_OUT || !port.second.net)
+ continue;
+ DelayQuad comb_delay;
+ bool is_path = ctx->getCellDelay(usr.cell, usr.port, port.first, comb_delay);
+ if (!is_path)
+ continue;
+ if (net_data.count(port.second.net) &&
+ net_data.at(port.second.net).count(startdomain.first)) {
+ auto path_budget =
+ net_data.at(port.second.net).at(startdomain.first).min_remaining_budget;
+ auto budget_share = budget_override ? 0 : path_budget / net_length_plus_one;
+ usr.budget = std::min(usr.budget, net_delay + budget_share);
+ net_min_remaining_budget =
+ std::min(net_min_remaining_budget, path_budget - budget_share);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (crit_path) {
+ // Walk backwards from the most critical net
+ for (auto crit_pair : crit_nets) {
+ NetInfo *crit_net = crit_pair.second.second;
+ auto &cp_ports = (*crit_path)[crit_pair.first].ports;
+ while (crit_net) {
+ const PortInfo *crit_ipin = nullptr;
+ delay_t max_arrival = std::numeric_limits<delay_t>::min();
+ // Look at all input ports on its driving cell
+ for (const auto &port : crit_net->driver.cell->ports) {
+ if (port.second.type != PORT_IN || !port.second.net)
+ continue;
+ DelayQuad comb_delay;
+ bool is_path =
+ ctx->getCellDelay(crit_net->driver.cell, port.first, crit_net->driver.port, comb_delay);
+ if (!is_path)
+ continue;
+ // If input port is influenced by a clock, skip
+ int port_clocks;
+ TimingPortClass portClass =
+ ctx->getPortTimingClass(crit_net->driver.cell, port.first, port_clocks);
+ if (portClass == TMG_CLOCK_INPUT || portClass == TMG_ENDPOINT || portClass == TMG_IGNORE)
+ continue;
+ // And find the fanin net with the latest arrival time
+ if (net_data.count(port.second.net) &&
+ net_data.at(port.second.net).count(crit_pair.first.start)) {
+ auto net_arrival = net_data.at(port.second.net).at(crit_pair.first.start).max_arrival;
+ if (net_delays) {
+ for (auto &user : port.second.net->users)
+ if (user.port == port.first && user.cell == crit_net->driver.cell) {
+ net_arrival += ctx->getNetinfoRouteDelay(port.second.net, user);
+ break;
+ }
+ }
+ net_arrival += comb_delay.maxDelay();
+ if (net_arrival > max_arrival) {
+ max_arrival = net_arrival;
+ crit_ipin = &port.second;
+ }
+ }
+ }
+
+ if (!crit_ipin)
+ break;
+ // Now convert PortInfo* into a PortRef*
+ for (auto &usr : crit_ipin->net->users) {
+ if (usr.cell->name == crit_net->driver.cell->name && usr.port == crit_ipin->name) {
+ cp_ports.push_back(&usr);
+ break;
+ }
+ }
+ crit_net = crit_ipin->net;
+ }
+ std::reverse(cp_ports.begin(), cp_ports.end());
+ }
+ }
+ return min_slack;
+ }
+
+ void assign_budget()
+ {
+ // Clear delays to a very high value first
+ for (auto &net : ctx->nets) {
+ for (auto &usr : net.second->users) {
+ usr.budget = std::numeric_limits<delay_t>::max();
+ }
+ }
+
+ walk_paths();
+ }
+};
+
+void assign_budget(Context *ctx, bool quiet)
+{
+ if (!quiet) {
+ log_break();
+ log_info("Annotating ports with timing budgets for target frequency %.2f MHz\n",
+ ctx->setting<float>("target_freq") / 1e6);
+ }
+
+ Timing timing(ctx, ctx->setting<int>("slack_redist_iter") > 0 /* net_delays */, true /* update */);
+ timing.assign_budget();
+
+ if (!quiet || ctx->verbose) {
+ for (auto &net : ctx->nets) {
+ for (auto &user : net.second->users) {
+ // Post-update check
+ if (!ctx->setting<bool>("auto_freq") && user.budget < 0)
+ log_info("port %s.%s, connected to net '%s', has negative "
+ "timing budget of %fns\n",
+ user.cell->name.c_str(ctx), user.port.c_str(ctx), net.first.c_str(ctx),
+ ctx->getDelayNS(user.budget));
+ else if (ctx->debug)
+ log_info("port %s.%s, connected to net '%s', has "
+ "timing budget of %fns\n",
+ user.cell->name.c_str(ctx), user.port.c_str(ctx), net.first.c_str(ctx),
+ ctx->getDelayNS(user.budget));
+ }
+ }
+ }
+
+ // For slack redistribution, if user has not specified a frequency dynamically adjust the target frequency to be the
+ // currently achieved maximum
+ if (ctx->setting<bool>("auto_freq") && ctx->setting<int>("slack_redist_iter") > 0) {
+ delay_t default_slack = delay_t((1.0e9 / ctx->getDelayNS(1)) / ctx->setting<float>("target_freq"));
+ ctx->settings[ctx->id("target_freq")] =
+ std::to_string(1.0e9 / ctx->getDelayNS(default_slack - timing.min_slack));
+ if (ctx->verbose)
+ log_info("minimum slack for this assign = %.2f ns, target Fmax for next "
+ "update = %.2f MHz\n",
+ ctx->getDelayNS(timing.min_slack), ctx->setting<float>("target_freq") / 1e6);
+ }
+
+ if (!quiet)
+ log_info("Checksum: 0x%08x\n", ctx->checksum());
+}
+
+CriticalPath build_critical_path_report(Context *ctx, ClockPair &clocks, const PortRefVector &crit_path)
+{
+
+ CriticalPath report;
+ report.clock_pair = clocks;
+
+ auto &front = crit_path.front();
+ auto &front_port = front->cell->ports.at(front->port);
+ auto &front_driver = front_port.net->driver;
+
+ int port_clocks;
+ auto portClass = ctx->getPortTimingClass(front_driver.cell, front_driver.port, port_clocks);
+
+ const CellInfo *last_cell = front->cell;
+ IdString last_port = front_driver.port;
+
+ int clock_start = -1;
+ if (portClass == TMG_REGISTER_OUTPUT) {
+ for (int i = 0; i < port_clocks; i++) {
+ TimingClockingInfo clockInfo = ctx->getPortClockingInfo(front_driver.cell, front_driver.port, i);
+ const NetInfo *clknet = get_net_or_empty(front_driver.cell, clockInfo.clock_port);
+ if (clknet != nullptr && clknet->name == clocks.start.clock && clockInfo.edge == clocks.start.edge) {
+ last_port = clockInfo.clock_port;
+ clock_start = i;
+ break;
+ }
+ }
+ }
+
+ for (auto sink : crit_path) {
+ auto sink_cell = sink->cell;
+ auto &port = sink_cell->ports.at(sink->port);
+ auto net = port.net;
+ auto &driver = net->driver;
+ auto driver_cell = driver.cell;
+
+ CriticalPath::Segment seg_logic;
+
+ DelayQuad comb_delay;
+ if (clock_start != -1) {
+ auto clockInfo = ctx->getPortClockingInfo(driver_cell, driver.port, clock_start);
+ comb_delay = clockInfo.clockToQ;
+ clock_start = -1;
+ seg_logic.type = CriticalPath::Segment::Type::CLK_TO_Q;
+ } else if (last_port == driver.port) {
+ // Case where we start with a STARTPOINT etc
+ comb_delay = DelayQuad(0);
+ seg_logic.type = CriticalPath::Segment::Type::SOURCE;
+ } else {
+ ctx->getCellDelay(driver_cell, last_port, driver.port, comb_delay);
+ seg_logic.type = CriticalPath::Segment::Type::LOGIC;
+ }
+
+ seg_logic.delay = comb_delay.maxDelay();
+ seg_logic.budget = 0;
+ seg_logic.from = std::make_pair(last_cell->name, last_port);
+ seg_logic.to = std::make_pair(driver_cell->name, driver.port);
+ seg_logic.net = IdString();
+ report.segments.push_back(seg_logic);
+
+ auto net_delay = ctx->getNetinfoRouteDelay(net, *sink);
+
+ CriticalPath::Segment seg_route;
+ seg_route.type = CriticalPath::Segment::Type::ROUTING;
+ seg_route.delay = net_delay;
+ seg_route.budget = sink->budget;
+ seg_route.from = std::make_pair(driver_cell->name, driver.port);
+ seg_route.to = std::make_pair(sink_cell->name, sink->port);
+ seg_route.net = net->name;
+ report.segments.push_back(seg_route);
+
+ last_cell = sink_cell;
+ last_port = sink->port;
+ }
+
+ int clockCount = 0;
+ auto sinkClass = ctx->getPortTimingClass(crit_path.back()->cell, crit_path.back()->port, clockCount);
+ if (sinkClass == TMG_REGISTER_INPUT && clockCount > 0) {
+ auto sinkClockInfo = ctx->getPortClockingInfo(crit_path.back()->cell, crit_path.back()->port, 0);
+ delay_t setup = sinkClockInfo.setup.maxDelay();
+
+ CriticalPath::Segment seg_logic;
+ seg_logic.type = CriticalPath::Segment::Type::SETUP;
+ seg_logic.delay = setup;
+ seg_logic.budget = 0;
+ seg_logic.from = std::make_pair(last_cell->name, last_port);
+ seg_logic.to = seg_logic.from;
+ seg_logic.net = IdString();
+ report.segments.push_back(seg_logic);
+ }
+
+ return report;
+}
+
+void timing_analysis(Context *ctx, bool print_histogram, bool print_fmax, bool print_path, bool warn_on_failure,
+ bool update_results)
+{
+ auto format_event = [ctx](const ClockEvent &e, int field_width = 0) {
+ std::string value;
+ if (e.clock == ctx->id("$async$"))
+ value = std::string("<async>");
+ else
+ value = (e.edge == FALLING_EDGE ? std::string("negedge ") : std::string("posedge ")) + e.clock.str(ctx);
+ if (int(value.length()) < field_width)
+ value.insert(value.length(), field_width - int(value.length()), ' ');
+ return value;
+ };
+
+ CriticalPathDataMap crit_paths;
+ DelayFrequency slack_histogram;
+ DetailedNetTimings detailed_net_timings;
+
+ Timing timing(ctx, true /* net_delays */, false /* update */, (print_path || print_fmax) ? &crit_paths : nullptr,
+ print_histogram ? &slack_histogram : nullptr,
+ (update_results && ctx->detailed_timing_report) ? &detailed_net_timings : nullptr);
+ timing.walk_paths();
+
+ bool report_critical_paths = print_path || print_fmax || update_results;
+
+ dict<IdString, CriticalPath> clock_reports;
+ std::vector<CriticalPath> xclock_reports;
+ dict<IdString, ClockFmax> clock_fmax;
+ std::set<IdString> empty_clocks; // set of clocks with no interior paths
+
+ if (report_critical_paths) {
+
+ for (auto path : crit_paths) {
+ const ClockEvent &a = path.first.start;
+ const ClockEvent &b = path.first.end;
+ empty_clocks.insert(a.clock);
+ empty_clocks.insert(b.clock);
+ }
+ for (auto path : crit_paths) {
+ const ClockEvent &a = path.first.start;
+ const ClockEvent &b = path.first.end;
+ if (a.clock != b.clock || a.clock == ctx->id("$async$"))
+ continue;
+ double Fmax;
+ empty_clocks.erase(a.clock);
+ if (a.edge == b.edge)
+ Fmax = 1000 / ctx->getDelayNS(path.second.path_delay);
+ else
+ Fmax = 500 / ctx->getDelayNS(path.second.path_delay);
+ if (!clock_fmax.count(a.clock) || Fmax < clock_fmax.at(a.clock).achieved) {
+ clock_fmax[a.clock].achieved = Fmax;
+ clock_fmax[a.clock].constraint = 0.0f; // Will be filled later
+ clock_reports[a.clock] = build_critical_path_report(ctx, path.first, path.second.ports);
+ clock_reports[a.clock].period = path.second.path_period;
+ }
+ }
+
+ for (auto &path : crit_paths) {
+ const ClockEvent &a = path.first.start;
+ const ClockEvent &b = path.first.end;
+ if (a.clock == b.clock && a.clock != ctx->id("$async$"))
+ continue;
+
+ auto &crit_path = crit_paths.at(path.first).ports;
+ xclock_reports.push_back(build_critical_path_report(ctx, path.first, crit_path));
+ xclock_reports.back().period = path.second.path_period;
+ }
+
+ if (clock_reports.empty()) {
+ log_info("No Fmax available; no interior timing paths found in design.\n");
+ }
+
+ std::sort(xclock_reports.begin(), xclock_reports.end(), [ctx](const CriticalPath &ra, const CriticalPath &rb) {
+ const auto &a = ra.clock_pair;
+ const auto &b = rb.clock_pair;
+
+ if (a.start.clock.str(ctx) < b.start.clock.str(ctx))
+ return true;
+ if (a.start.clock.str(ctx) > b.start.clock.str(ctx))
+ return false;
+ if (a.start.edge < b.start.edge)
+ return true;
+ if (a.start.edge > b.start.edge)
+ return false;
+ if (a.end.clock.str(ctx) < b.end.clock.str(ctx))
+ return true;
+ if (a.end.clock.str(ctx) > b.end.clock.str(ctx))
+ return false;
+ if (a.end.edge < b.end.edge)
+ return true;
+ return false;
+ });
+
+ for (auto &clock : clock_reports) {
+ float target = ctx->setting<float>("target_freq") / 1e6;
+ if (ctx->nets.at(clock.first)->clkconstr)
+ target = 1000 / ctx->getDelayNS(ctx->nets.at(clock.first)->clkconstr->period.minDelay());
+ clock_fmax[clock.first].constraint = target;
+ }
+ }
+
+ // Print critical paths
+ if (print_path) {
+
+ static auto print_net_source = [ctx](const NetInfo *net) {
+ // Check if this net is annotated with a source list
+ auto sources = net->attrs.find(ctx->id("src"));
+ if (sources == net->attrs.end()) {
+ // No sources for this net, can't print anything
+ return;
+ }
+
+ // Sources are separated by pipe characters.
+ // There is no guaranteed ordering on sources, so we just print all
+ auto sourcelist = sources->second.as_string();
+ std::vector<std::string> source_entries;
+ size_t current = 0, prev = 0;
+ while ((current = sourcelist.find("|", prev)) != std::string::npos) {
+ source_entries.emplace_back(sourcelist.substr(prev, current - prev));
+ prev = current + 1;
+ }
+ // Ensure we emplace the final entry
+ source_entries.emplace_back(sourcelist.substr(prev, current - prev));
+
+ // Iterate and print our source list at the correct indentation level
+ log_info(" Defined in:\n");
+ for (auto entry : source_entries) {
+ log_info(" %s\n", entry.c_str());
+ }
+ };
+
+ // A helper function for reporting one critical path
+ auto print_path_report = [ctx](const CriticalPath &path) {
+ delay_t total = 0, logic_total = 0, route_total = 0;
+
+ log_info("curr total\n");
+ for (const auto &segment : path.segments) {
+
+ total += segment.delay;
+
+ if (segment.type == CriticalPath::Segment::Type::CLK_TO_Q ||
+ segment.type == CriticalPath::Segment::Type::SOURCE ||
+ segment.type == CriticalPath::Segment::Type::LOGIC ||
+ segment.type == CriticalPath::Segment::Type::SETUP) {
+ logic_total += segment.delay;
+
+ const std::string type_name =
+ (segment.type == CriticalPath::Segment::Type::SETUP) ? "Setup" : "Source";
+
+ log_info("%4.1f %4.1f %s %s.%s\n", ctx->getDelayNS(segment.delay), ctx->getDelayNS(total),
+ type_name.c_str(), segment.to.first.c_str(ctx), segment.to.second.c_str(ctx));
+ } else if (segment.type == CriticalPath::Segment::Type::ROUTING) {
+ route_total += segment.delay;
+
+ const auto &driver = ctx->cells.at(segment.from.first);
+ const auto &sink = ctx->cells.at(segment.to.first);
+
+ auto driver_loc = ctx->getBelLocation(driver->bel);
+ auto sink_loc = ctx->getBelLocation(sink->bel);
+
+ log_info("%4.1f %4.1f Net %s budget %f ns (%d,%d) -> (%d,%d)\n", ctx->getDelayNS(segment.delay),
+ ctx->getDelayNS(total), segment.net.c_str(ctx), ctx->getDelayNS(segment.budget),
+ driver_loc.x, driver_loc.y, sink_loc.x, sink_loc.y);
+ log_info(" Sink %s.%s\n", segment.to.first.c_str(ctx), segment.to.second.c_str(ctx));
+
+ const NetInfo *net = ctx->nets.at(segment.net).get();
+
+ if (ctx->verbose) {
+
+ PortRef sink_ref;
+ sink_ref.cell = sink.get();
+ sink_ref.port = segment.to.second;
+ sink_ref.budget = segment.budget;
+
+ auto driver_wire = ctx->getNetinfoSourceWire(net);
+ auto sink_wire = ctx->getNetinfoSinkWire(net, sink_ref, 0);
+ log_info(" prediction: %f ns estimate: %f ns\n",
+ ctx->getDelayNS(ctx->predictArcDelay(net, sink_ref)),
+ ctx->getDelayNS(ctx->estimateDelay(driver_wire, sink_wire)));
+ auto cursor = sink_wire;
+ delay_t delay;
+ while (driver_wire != cursor) {
+#ifdef ARCH_ECP5
+ if (net->is_global)
+ break;
+#endif
+ auto it = net->wires.find(cursor);
+ assert(it != net->wires.end());
+ auto pip = it->second.pip;
+ NPNR_ASSERT(pip != PipId());
+ delay = ctx->getPipDelay(pip).maxDelay();
+ log_info(" %1.3f %s\n", ctx->getDelayNS(delay), ctx->nameOfPip(pip));
+ cursor = ctx->getPipSrcWire(pip);
+ }
+ }
+
+ if (!ctx->disable_critical_path_source_print) {
+ print_net_source(net);
+ }
+ }
+ }
+ log_info("%.1f ns logic, %.1f ns routing\n", ctx->getDelayNS(logic_total), ctx->getDelayNS(route_total));
+ };
+
+ // Single domain paths
+ for (auto &clock : clock_reports) {
+ log_break();
+ std::string start = clock.second.clock_pair.start.edge == FALLING_EDGE ? std::string("negedge")
+ : std::string("posedge");
+ std::string end =
+ clock.second.clock_pair.end.edge == FALLING_EDGE ? std::string("negedge") : std::string("posedge");
+ log_info("Critical path report for clock '%s' (%s -> %s):\n", clock.first.c_str(ctx), start.c_str(),
+ end.c_str());
+ auto &report = clock.second;
+ print_path_report(report);
+ }
+
+ // Cross-domain paths
+ for (auto &report : xclock_reports) {
+ log_break();
+ std::string start = format_event(report.clock_pair.start);
+ std::string end = format_event(report.clock_pair.end);
+ log_info("Critical path report for cross-domain path '%s' -> '%s':\n", start.c_str(), end.c_str());
+ print_path_report(report);
+ }
+ }
+
+ if (print_fmax) {
+ log_break();
+
+ unsigned max_width = 0;
+ for (auto &clock : clock_reports)
+ max_width = std::max<unsigned>(max_width, clock.first.str(ctx).size());
+
+ for (auto &clock : clock_reports) {
+ const auto &clock_name = clock.first.str(ctx);
+ const int width = max_width - clock_name.size();
+
+ float fmax = clock_fmax[clock.first].achieved;
+ float target = clock_fmax[clock.first].constraint;
+ bool passed = target < fmax;
+
+ if (!warn_on_failure || passed)
+ log_info("Max frequency for clock %*s'%s': %.02f MHz (%s at %.02f MHz)\n", width, "",
+ clock_name.c_str(), fmax, passed ? "PASS" : "FAIL", target);
+ else if (bool_or_default(ctx->settings, ctx->id("timing/allowFail"), false))
+ log_warning("Max frequency for clock %*s'%s': %.02f MHz (%s at %.02f MHz)\n", width, "",
+ clock_name.c_str(), fmax, passed ? "PASS" : "FAIL", target);
+ else
+ log_nonfatal_error("Max frequency for clock %*s'%s': %.02f MHz (%s at %.02f MHz)\n", width, "",
+ clock_name.c_str(), fmax, passed ? "PASS" : "FAIL", target);
+ }
+ for (auto &eclock : empty_clocks) {
+ if (eclock != ctx->id("$async$"))
+ log_info("Clock '%s' has no interior paths\n", eclock.c_str(ctx));
+ }
+ log_break();
+
+ int start_field_width = 0, end_field_width = 0;
+ for (auto &report : xclock_reports) {
+ start_field_width = std::max((int)format_event(report.clock_pair.start).length(), start_field_width);
+ end_field_width = std::max((int)format_event(report.clock_pair.end).length(), end_field_width);
+ }
+
+ for (auto &report : xclock_reports) {
+ const ClockEvent &a = report.clock_pair.start;
+ const ClockEvent &b = report.clock_pair.end;
+ delay_t path_delay = 0;
+ for (const auto &segment : report.segments) {
+ path_delay += segment.delay;
+ }
+ auto ev_a = format_event(a, start_field_width), ev_b = format_event(b, end_field_width);
+ log_info("Max delay %s -> %s: %0.02f ns\n", ev_a.c_str(), ev_b.c_str(), ctx->getDelayNS(path_delay));
+ }
+ log_break();
+ }
+
+ if (print_histogram && slack_histogram.size() > 0) {
+ unsigned num_bins = 20;
+ unsigned bar_width = 60;
+ auto min_slack = slack_histogram.begin()->first;
+ auto max_slack = slack_histogram.rbegin()->first;
+ auto bin_size = std::max<unsigned>(1, ceil((max_slack - min_slack + 1) / float(num_bins)));
+ std::vector<unsigned> bins(num_bins);
+ unsigned max_freq = 0;
+ for (const auto &i : slack_histogram) {
+ int bin_idx = int((i.first - min_slack) / bin_size);
+ if (bin_idx < 0)
+ bin_idx = 0;
+ else if (bin_idx >= int(num_bins))
+ bin_idx = num_bins - 1;
+ auto &bin = bins.at(bin_idx);
+ bin += i.second;
+ max_freq = std::max(max_freq, bin);
+ }
+ bar_width = std::min(bar_width, max_freq);
+
+ log_break();
+ log_info("Slack histogram:\n");
+ log_info(" legend: * represents %d endpoint(s)\n", max_freq / bar_width);
+ log_info(" + represents [1,%d) endpoint(s)\n", max_freq / bar_width);
+ for (unsigned i = 0; i < num_bins; ++i)
+ log_info("[%6d, %6d) |%s%c\n", min_slack + bin_size * i, min_slack + bin_size * (i + 1),
+ std::string(bins[i] * bar_width / max_freq, '*').c_str(),
+ (bins[i] * bar_width) % max_freq > 0 ? '+' : ' ');
+ }
+
+ // Update timing results in the context
+ if (update_results) {
+ auto &results = ctx->timing_result;
+
+ results.clock_fmax = std::move(clock_fmax);
+ results.clock_paths = std::move(clock_reports);
+ results.xclock_paths = std::move(xclock_reports);
+
+ results.detailed_net_timings = std::move(detailed_net_timings);
+ }
+}
+
+NEXTPNR_NAMESPACE_END
diff --git a/common/kernel/timing.h b/common/kernel/timing.h
new file mode 100644
index 00000000..fe1bcaa8
--- /dev/null
+++ b/common/kernel/timing.h
@@ -0,0 +1,236 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef TIMING_H
+#define TIMING_H
+
+#include "nextpnr.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+struct CellPortKey
+{
+ CellPortKey(){};
+ CellPortKey(IdString cell, IdString port) : cell(cell), port(port){};
+ explicit CellPortKey(const PortRef &pr)
+ {
+ NPNR_ASSERT(pr.cell != nullptr);
+ cell = pr.cell->name;
+ port = pr.port;
+ }
+ IdString cell, port;
+ unsigned int hash() const { return mkhash(cell.hash(), port.hash()); }
+ inline bool operator==(const CellPortKey &other) const { return (cell == other.cell) && (port == other.port); }
+ inline bool operator!=(const CellPortKey &other) const { return (cell != other.cell) || (port != other.port); }
+ inline bool operator<(const CellPortKey &other) const
+ {
+ return cell == other.cell ? port < other.port : cell < other.cell;
+ }
+};
+
+struct ClockDomainKey
+{
+ IdString clock;
+ ClockEdge edge;
+ ClockDomainKey(IdString clock_net, ClockEdge edge) : clock(clock_net), edge(edge){};
+ // probably also need something here to deal with constraints
+ inline bool is_async() const { return clock == IdString(); }
+
+ unsigned int hash() const { return mkhash(clock.hash(), int(edge)); }
+
+ inline bool operator==(const ClockDomainKey &other) const { return (clock == other.clock) && (edge == other.edge); }
+};
+
+typedef int domain_id_t;
+
+struct ClockDomainPairKey
+{
+ domain_id_t launch, capture;
+ ClockDomainPairKey(domain_id_t launch, domain_id_t capture) : launch(launch), capture(capture){};
+ inline bool operator==(const ClockDomainPairKey &other) const
+ {
+ return (launch == other.launch) && (capture == other.capture);
+ }
+ unsigned int hash() const { return mkhash(launch, capture); }
+};
+
+struct TimingAnalyser
+{
+ public:
+ TimingAnalyser(Context *ctx) : ctx(ctx){};
+ void setup();
+ void run(bool update_route_delays = true);
+ void print_report();
+
+ // This is used when routers etc are not actually binding detailed routing (due to congestion or an abstracted
+ // model), but want to re-run STA with their own calculated delays
+ void set_route_delay(CellPortKey port, DelayPair value);
+
+ float get_criticality(CellPortKey port) const { return ports.at(port).worst_crit; }
+ float get_setup_slack(CellPortKey port) const { return ports.at(port).worst_setup_slack; }
+ float get_domain_setup_slack(CellPortKey port) const
+ {
+ delay_t slack = std::numeric_limits<delay_t>::max();
+ for (const auto &dp : ports.at(port).domain_pairs)
+ slack = std::min(slack, domain_pairs.at(dp.first).worst_setup_slack);
+ return slack;
+ }
+
+ bool setup_only = false;
+ bool verbose_mode = false;
+ bool have_loops = false;
+ bool updated_domains = false;
+
+ private:
+ void init_ports();
+ void get_cell_delays();
+ void get_route_delays();
+ void topo_sort();
+ void setup_port_domains();
+
+ void reset_times();
+
+ void walk_forward();
+ void walk_backward();
+
+ void compute_slack();
+ void compute_criticality();
+
+ void print_fmax();
+ // get the N most failing endpoints for a given domain pair
+ std::vector<CellPortKey> get_failing_eps(domain_id_t domain_pair, int count);
+ // print the critical path for an endpoint and domain pair
+ void print_critical_path(CellPortKey endpoint, domain_id_t domain_pair);
+
+ const DelayPair init_delay{std::numeric_limits<delay_t>::max(), std::numeric_limits<delay_t>::lowest()};
+
+ // Set arrival/required times if more/less than the current value
+ void set_arrival_time(CellPortKey target, domain_id_t domain, DelayPair arrival, int path_length,
+ CellPortKey prev = CellPortKey());
+ void set_required_time(CellPortKey target, domain_id_t domain, DelayPair required, int path_length,
+ CellPortKey prev = CellPortKey());
+
+ // To avoid storing the domain tag structure (which could get large when considering more complex constrained tag
+ // cases), assign each domain an ID and use that instead
+ // An arrival or required time entry. Stores both the min/max delays; and the traversal to reach them for critical
+ // path reporting
+ struct ArrivReqTime
+ {
+ DelayPair value;
+ CellPortKey bwd_min, bwd_max;
+ int path_length;
+ };
+ // Data per port-domain tuple
+ struct PortDomainPairData
+ {
+ delay_t setup_slack = std::numeric_limits<delay_t>::max(), hold_slack = std::numeric_limits<delay_t>::max();
+ delay_t budget = std::numeric_limits<delay_t>::max();
+ int max_path_length = 0;
+ float criticality = 0;
+ };
+
+ // A cell timing arc, used to cache cell timings and reduce the number of potentially-expensive Arch API calls
+ struct CellArc
+ {
+
+ enum ArcType
+ {
+ COMBINATIONAL,
+ SETUP,
+ HOLD,
+ CLK_TO_Q
+ } type;
+
+ IdString other_port;
+ DelayQuad value;
+ // Clock polarity, not used for combinational arcs
+ ClockEdge edge;
+
+ CellArc(ArcType type, IdString other_port, DelayQuad value)
+ : type(type), other_port(other_port), value(value), edge(RISING_EDGE){};
+ CellArc(ArcType type, IdString other_port, DelayQuad value, ClockEdge edge)
+ : type(type), other_port(other_port), value(value), edge(edge){};
+ };
+
+ // Timing data for every cell port
+ struct PerPort
+ {
+ CellPortKey cell_port;
+ PortType type;
+ // per domain timings
+ dict<domain_id_t, ArrivReqTime> arrival;
+ dict<domain_id_t, ArrivReqTime> required;
+ dict<domain_id_t, PortDomainPairData> domain_pairs;
+ // cell timing arcs to (outputs)/from (inputs) from this port
+ std::vector<CellArc> cell_arcs;
+ // routing delay into this port (input ports only)
+ DelayPair route_delay{0};
+ // worst criticality and slack across domain pairs
+ float worst_crit = 0;
+ delay_t worst_setup_slack = std::numeric_limits<delay_t>::max(),
+ worst_hold_slack = std::numeric_limits<delay_t>::max();
+ };
+
+ struct PerDomain
+ {
+ PerDomain(ClockDomainKey key) : key(key){};
+ ClockDomainKey key;
+ // these are pairs (signal port; clock port)
+ std::vector<std::pair<CellPortKey, IdString>> startpoints, endpoints;
+ };
+
+ struct PerDomainPair
+ {
+ PerDomainPair(ClockDomainPairKey key) : key(key){};
+ ClockDomainPairKey key;
+ DelayPair period{0};
+ delay_t worst_setup_slack, worst_hold_slack;
+ };
+
+ CellInfo *cell_info(const CellPortKey &key);
+ PortInfo &port_info(const CellPortKey &key);
+
+ domain_id_t domain_id(IdString cell, IdString clock_port, ClockEdge edge);
+ domain_id_t domain_id(const NetInfo *net, ClockEdge edge);
+ domain_id_t domain_pair_id(domain_id_t launch, domain_id_t capture);
+
+ void copy_domains(const CellPortKey &from, const CellPortKey &to, bool backwards);
+
+ dict<CellPortKey, PerPort> ports;
+ dict<ClockDomainKey, domain_id_t> domain_to_id;
+ dict<ClockDomainPairKey, domain_id_t> pair_to_id;
+ std::vector<PerDomain> domains;
+ std::vector<PerDomainPair> domain_pairs;
+
+ std::vector<CellPortKey> topological_order;
+
+ Context *ctx;
+};
+
+// Evenly redistribute the total path slack amongst all sinks on each path
+void assign_budget(Context *ctx, bool quiet = false);
+
+// Perform timing analysis and print out the fmax, and optionally the
+// critical path
+void timing_analysis(Context *ctx, bool slack_histogram = true, bool print_fmax = true, bool print_path = false,
+ bool warn_on_failure = false, bool update_results = false);
+
+NEXTPNR_NAMESPACE_END
+
+#endif
diff --git a/common/kernel/util.h b/common/kernel/util.h
new file mode 100644
index 00000000..c10abb72
--- /dev/null
+++ b/common/kernel/util.h
@@ -0,0 +1,241 @@
+/*
+ * nextpnr -- Next Generation Place and Route
+ *
+ * Copyright (C) 2018 gatecat <gatecat@ds0.me>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <map>
+#include <set>
+#include <string>
+#include "nextpnr.h"
+
+#include "log.h"
+
+NEXTPNR_NAMESPACE_BEGIN
+
+// Get a value from a map-style container, returning default if value is not
+// found
+template <typename Container, typename KeyType, typename ValueType>
+ValueType get_or_default(const Container &ct, const KeyType &key, ValueType def = ValueType())
+{
+ auto found = ct.find(key);
+ if (found == ct.end())
+ return def;
+ else
+ return found->second;
+};
+
+// Get a value from a map-style container, returning default if value is not
+// found (forces string)
+template <typename Container, typename KeyType>
+std::string str_or_default(const Container &ct, const KeyType &key, std::string def = "")
+{
+ auto found = ct.find(key);
+ if (found == ct.end())
+ return def;
+ else {
+ return found->second;
+ }
+};
+
+template <typename KeyType>
+std::string str_or_default(const dict<KeyType, Property> &ct, const KeyType &key, std::string def = "")
+{
+ auto found = ct.find(key);
+ if (found == ct.end())
+ return def;
+ else {
+ if (!found->second.is_string)
+ log_error("Expecting string value but got integer %d.\n", int(found->second.intval));
+ return found->second.as_string();
+ }
+};
+
+// Get a value from a map-style container, converting to int, and returning
+// default if value is not found
+template <typename Container, typename KeyType> int int_or_default(const Container &ct, const KeyType &key, int def = 0)
+{
+ auto found = ct.find(key);
+ if (found == ct.end())
+ return def;
+ else
+ return std::stoi(found->second);
+};
+
+template <typename KeyType> int int_or_default(const dict<KeyType, Property> &ct, const KeyType &key, int def = 0)
+{
+ auto found = ct.find(key);
+ if (found == ct.end())
+ return def;
+ else {
+ if (found->second.is_string) {
+ try {
+ return std::stoi(found->second.as_string());
+ } catch (std::invalid_argument &e) {
+ log_error("Expecting numeric value but got '%s'.\n", found->second.as_string().c_str());
+ }
+ } else
+ return found->second.as_int64();
+ }
+};
+
+// As above, but convert to bool
+template <typename Container, typename KeyType>
+bool bool_or_default(const Container &ct, const KeyType &key, bool def = false)
+{
+ return bool(int_or_default(ct, key, int(def)));
+};
+
+// Return a net if port exists, or nullptr
+inline const NetInfo *get_net_or_empty(const CellInfo *cell, const IdString port)
+{
+ auto found = cell->ports.find(port);
+ if (found != cell->ports.end())
+ return found->second.net;
+ else
+ return nullptr;
+}
+
+inline NetInfo *get_net_or_empty(CellInfo *cell, const IdString port)
+{
+ auto found = cell->ports.find(port);
+ if (found != cell->ports.end())
+ return found->second.net;
+ else
+ return nullptr;
+}
+
+// Get only value from a forward iterator begin/end pair.
+//
+// Generates assertion failure if std::distance(begin, end) != 1.
+template <typename ForwardIterator>
+inline const typename ForwardIterator::reference get_only_value(ForwardIterator begin, ForwardIterator end)
+{
+ NPNR_ASSERT(begin != end);
+ const typename ForwardIterator::reference ret = *begin;
+ ++begin;
+ NPNR_ASSERT(begin == end);
+ return ret;
+}
+
+// Get only value from a forward iterator range pair.
+//
+// Generates assertion failure if std::distance(r.begin(), r.end()) != 1.
+template <typename ForwardRange> inline auto get_only_value(ForwardRange r)
+{
+ auto b = r.begin();
+ auto e = r.end();
+ return get_only_value(b, e);
+}
+
+// From Yosys
+// https://github.com/YosysHQ/yosys/blob/0fb4224ebca86156a1296b9210116d9a9cbebeed/kernel/utils.h#L131
+template <typename T, typename C = std::less<T>> struct TopoSort
+{
+ bool analyze_loops, found_loops;
+ std::map<T, std::set<T, C>, C> database;
+ std::set<std::set<T, C>> loops;
+ std::vector<T> sorted;
+
+ TopoSort()
+ {
+ analyze_loops = true;
+ found_loops = false;
+ }
+
+ void node(T n)
+ {
+ if (database.count(n) == 0)
+ database[n] = std::set<T, C>();
+ }
+
+ void edge(T left, T right)
+ {
+ node(left);
+ database[right].insert(left);
+ }
+
+ void sort_worker(const T &n, std::set<T, C> &marked_cells, std::set<T, C> &active_cells,
+ std::vector<T> &active_stack)
+ {
+ if (active_cells.count(n)) {
+ found_loops = true;
+ if (analyze_loops) {
+ std::set<T, C> loop;
+ for (int i = int(active_stack.size()) - 1; i >= 0; i--) {
+ loop.insert(active_stack[i]);
+ if (active_stack[i] == n)
+ break;
+ }
+ loops.insert(loop);
+ }
+ return;
+ }
+
+ if (marked_cells.count(n))
+ return;
+
+ if (!database.at(n).empty()) {
+ if (analyze_loops)
+ active_stack.push_back(n);
+ active_cells.insert(n);
+
+ for (auto &left_n : database.at(n))
+ sort_worker(left_n, marked_cells, active_cells, active_stack);
+
+ if (analyze_loops)
+ active_stack.pop_back();
+ active_cells.erase(n);
+ }
+
+ marked_cells.insert(n);
+ sorted.push_back(n);
+ }
+
+ bool sort()
+ {
+ loops.clear();
+ sorted.clear();
+ found_loops = false;
+
+ std::set<T, C> marked_cells;
+ std::set<T, C> active_cells;
+ std::vector<T> active_stack;
+
+ for (auto &it : database)
+ sort_worker(it.first, marked_cells, active_cells, active_stack);
+
+ NPNR_ASSERT(sorted.size() == database.size());
+ return !found_loops;
+ }
+};
+
+template <typename T> struct reversed_range_t
+{
+ T &obj;
+ explicit reversed_range_t(T &obj) : obj(obj){};
+ auto begin() { return obj.rbegin(); }
+ auto end() { return obj.rend(); }
+};
+
+template <typename T> reversed_range_t<T> reversed_range(T &obj) { return reversed_range_t<T>(obj); }
+
+NEXTPNR_NAMESPACE_END
+
+#endif