aboutsummaryrefslogtreecommitdiffstats
path: root/common/router1.cc
diff options
context:
space:
mode:
Diffstat (limited to 'common/router1.cc')
-rw-r--r--common/router1.cc133
1 files changed, 120 insertions, 13 deletions
diff --git a/common/router1.cc b/common/router1.cc
index 0ff2bedd..f387aee1 100644
--- a/common/router1.cc
+++ b/common/router1.cc
@@ -117,14 +117,24 @@ struct Router1
int arcs_without_ripup = 0;
bool ripup_flag;
- Router1(Context *ctx, const Router1Cfg &cfg) : ctx(ctx), cfg(cfg) {}
+ TimingAnalyser tmg;
+
+ bool timing_driven = true;
+
+ Router1(Context *ctx, const Router1Cfg &cfg) : ctx(ctx), cfg(cfg), tmg(ctx)
+ {
+ timing_driven = ctx->setting<bool>("timing_driven");
+ tmg.setup();
+ tmg.run();
+ }
void arc_queue_insert(const arc_key &arc, WireId src_wire, WireId dst_wire)
{
if (queued_arcs.count(arc))
return;
- delay_t pri = ctx->estimateDelay(src_wire, dst_wire) - arc.net_info->users[arc.user_idx].budget;
+ delay_t pri = ctx->estimateDelay(src_wire, dst_wire) *
+ (100 * tmg.get_criticality(CellPortKey(arc.net_info->users.at(arc.user_idx))));
arc_entry entry;
entry.arc = arc;
@@ -459,6 +469,8 @@ struct Router1
auto dst_wire = ctx->getNetinfoSinkWire(net_info, net_info->users[user_idx], arc.phys_idx);
ripup_flag = false;
+ float crit = tmg.get_criticality(CellPortKey(net_info->users.at(user_idx)));
+
if (ctx->debug) {
log("Routing arc %d on net %s (%d arcs total):\n", user_idx, ctx->nameOf(net_info),
int(net_info->users.size()));
@@ -536,6 +548,7 @@ struct Router1
delay_t next_delay = qw.delay + ctx->getPipDelay(pip).maxDelay();
delay_t next_penalty = qw.penalty;
delay_t next_bonus = qw.bonus;
+ delay_t penalty_delta = 0;
WireId next_wire = ctx->getPipDstWire(pip);
next_delay += ctx->getWireDelay(next_wire).maxDelay();
@@ -544,7 +557,7 @@ struct Router1
NetInfo *conflictWireNet = nullptr, *conflictPipNet = nullptr;
if (net_info->wires.count(next_wire) && net_info->wires.at(next_wire).pip == pip) {
- next_bonus += cfg.reuseBonus;
+ next_bonus += cfg.reuseBonus * (1.0 - crit);
} else {
if (!ctx->checkWireAvail(next_wire)) {
if (!ripup)
@@ -609,34 +622,36 @@ struct Router1
if (conflictWireWire != WireId()) {
auto scores_it = wireScores.find(conflictWireWire);
if (scores_it != wireScores.end())
- next_penalty += scores_it->second * cfg.wireRipupPenalty;
- next_penalty += cfg.wireRipupPenalty;
+ penalty_delta += scores_it->second * cfg.wireRipupPenalty;
+ penalty_delta += cfg.wireRipupPenalty;
}
if (conflictPipWire != WireId()) {
auto scores_it = wireScores.find(conflictPipWire);
if (scores_it != wireScores.end())
- next_penalty += scores_it->second * cfg.wireRipupPenalty;
- next_penalty += cfg.wireRipupPenalty;
+ penalty_delta += scores_it->second * cfg.wireRipupPenalty;
+ penalty_delta += cfg.wireRipupPenalty;
}
if (conflictWireNet != nullptr) {
auto scores_it = netScores.find(conflictWireNet);
if (scores_it != netScores.end())
- next_penalty += scores_it->second * cfg.netRipupPenalty;
- next_penalty += cfg.netRipupPenalty;
- next_penalty += conflictWireNet->wires.size() * cfg.wireRipupPenalty;
+ penalty_delta += scores_it->second * cfg.netRipupPenalty;
+ penalty_delta += cfg.netRipupPenalty;
+ penalty_delta += conflictWireNet->wires.size() * cfg.wireRipupPenalty;
}
if (conflictPipNet != nullptr) {
auto scores_it = netScores.find(conflictPipNet);
if (scores_it != netScores.end())
- next_penalty += scores_it->second * cfg.netRipupPenalty;
- next_penalty += cfg.netRipupPenalty;
- next_penalty += conflictPipNet->wires.size() * cfg.wireRipupPenalty;
+ penalty_delta += scores_it->second * cfg.netRipupPenalty;
+ penalty_delta += cfg.netRipupPenalty;
+ penalty_delta += conflictPipNet->wires.size() * cfg.wireRipupPenalty;
}
}
+ next_penalty += penalty_delta * (timing_driven ? std::max(0.05, (1.0 - crit)) : 1);
+
delay_t next_score = next_delay + next_penalty;
NPNR_ASSERT(next_score >= 0);
@@ -778,6 +793,53 @@ struct Router1
return true;
}
+
+ delay_t find_slack_thresh()
+ {
+ // If more than 5% of arcs have negative slack; use the 5% threshold as a ripup criteria
+ int arc_count = 0;
+ int failed_count = 0;
+ delay_t default_thresh = ctx->getDelayEpsilon();
+
+ for (auto &net : ctx->nets) {
+ NetInfo *ni = net.second.get();
+ if (skip_net(ni))
+ continue;
+ for (size_t i = 0; i < ni->users.size(); i++) {
+ auto &usr = ni->users.at(i);
+ ++arc_count;
+ delay_t slack = tmg.get_setup_slack(CellPortKey(usr));
+ if (slack == std::numeric_limits<delay_t>::min())
+ continue;
+ if (slack < default_thresh)
+ ++failed_count;
+ }
+ }
+
+ if (arc_count < 50 || (failed_count < (0.05 * arc_count))) {
+ return default_thresh;
+ }
+
+ std::vector<delay_t> slacks;
+ for (auto &net : ctx->nets) {
+ NetInfo *ni = net.second.get();
+ if (skip_net(ni))
+ continue;
+ for (size_t i = 0; i < ni->users.size(); i++) {
+ auto &usr = ni->users.at(i);
+ delay_t slack = tmg.get_setup_slack(CellPortKey(usr));
+ if (slack == std::numeric_limits<delay_t>::min())
+ continue;
+ slacks.push_back(slack);
+ }
+ }
+ std::sort(slacks.begin(), slacks.end());
+ delay_t thresh = slacks.at(int(slacks.size() * 0.05));
+ log_warning("%.f%% of arcs have failing slack; using %.2fns as ripup threshold. Consider a reduced Fmax "
+ "constraint.\n",
+ (100.0 * failed_count) / arc_count, ctx->getDelayNS(thresh));
+ return thresh;
+ }
};
} // namespace
@@ -819,6 +881,9 @@ bool router1(Context *ctx, const Router1Cfg &cfg)
int iter_cnt = 0;
int last_arcs_with_ripup = 0;
int last_arcs_without_ripup = 0;
+ int timing_fail_count = 0;
+ bool timing_ripup = ctx->setting<bool>("router/tmg_ripup", false);
+ delay_t ripup_slack = 0;
log_info(" | (re-)routed arcs | delta | remaining| time spent |\n");
log_info(" IterCnt | w/ripup wo/ripup | w/r wo/r | arcs| batch(sec) total(sec)|\n");
@@ -854,6 +919,48 @@ bool router1(Context *ctx, const Router1Cfg &cfg)
#endif
return false;
}
+ // Timing driven ripup
+ if (timing_ripup && router.arc_queue.empty() && timing_fail_count < 50) {
+ ++timing_fail_count;
+ router.tmg.run();
+ delay_t wns = 0, tns = 0;
+ if (timing_fail_count == 1)
+ ripup_slack = router.find_slack_thresh();
+ for (auto &net : ctx->nets) {
+ NetInfo *ni = net.second.get();
+ if (router.skip_net(ni))
+ continue;
+ bool is_locked = false;
+ for (auto &wire : ni->wires) {
+ if (wire.second.strength > STRENGTH_STRONG)
+ is_locked = true;
+ }
+ if (is_locked)
+ continue;
+ for (size_t i = 0; i < ni->users.size(); i++) {
+ auto &usr = ni->users.at(i);
+ delay_t slack = router.tmg.get_setup_slack(CellPortKey(usr));
+ if (slack == std::numeric_limits<delay_t>::min())
+ continue;
+ if (slack < 0) {
+ wns = std::min(wns, slack);
+ tns += slack;
+ }
+ if (slack <= ripup_slack) {
+ for (WireId w : ctx->getNetinfoSinkWires(ni, usr)) {
+ if (ctx->checkWireAvail(w))
+ continue;
+ router.ripup_wire(w);
+ }
+ }
+ }
+ }
+ log_info(" %d arcs ripped up due to negative slack WNS=%.02fns TNS=%.02fns.\n",
+ int(router.arc_queue.size()), ctx->getDelayNS(wns), ctx->getDelayNS(tns));
+ iter_cnt = 0;
+ router.wireScores.clear();
+ router.netScores.clear();
+ }
}
auto rend = std::chrono::high_resolution_clock::now();
log_info("%10d | %8d %10d | %4d %5d | %9d| %10.02f %10.02f|\n", iter_cnt, router.arcs_with_ripup,