summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorBaruch Sterin <baruchs@gmail.com>2013-10-08 12:04:08 -0700
committerBaruch Sterin <baruchs@gmail.com>2013-10-08 12:04:08 -0700
commit88f75c00ad6788afd78d2402358c45d14d4c1ede (patch)
treec5b04932e416e883c2ddd8f4059b60a5e3952f79 /scripts
parentcbc718d701c26e7ef0e243a44ed420bacbe0056a (diff)
downloadabc-88f75c00ad6788afd78d2402358c45d14d4c1ede.tar.gz
abc-88f75c00ad6788afd78d2402358c45d14d4c1ede.tar.bz2
abc-88f75c00ad6788afd78d2402358c45d14d4c1ede.zip
script changes for HWMCC13 (finally submitted version)
Diffstat (limited to 'scripts')
-rw-r--r--scripts/abc.rc288
-rw-r--r--scripts/main.py102
-rw-r--r--scripts/multi_prove.sh (renamed from scripts/simple_prove.sh)2
-rw-r--r--scripts/new_abc_commands.py174
-rw-r--r--scripts/niklas.py182
-rw-r--r--scripts/par.py5445
-rw-r--r--scripts/simple.sh (renamed from scripts/bmc2.sh)3
-rw-r--r--scripts/simple_liveness.sh (renamed from scripts/bmc3.sh)3
-rw-r--r--scripts/simple_sat.sh (renamed from scripts/dprove.sh)3
9 files changed, 4598 insertions, 1604 deletions
diff --git a/scripts/abc.rc b/scripts/abc.rc
index a39ed76a..365a49ca 100644
--- a/scripts/abc.rc
+++ b/scripts/abc.rc
@@ -1,10 +1,12 @@
+# reachx
python new_abc_commands.py
python -c "import reachx_cmd"
+
# global parameters
set check # checks intermediate networks
#set checkfio # prints warnings when fanins/fanouts are duplicated
-#set checkread # checks new networks after reading from file
+#unset checkread # does not check new networks after reading from file
#set backup # saves backup networks retrived by "undo" and "recall"
#set savesteps 1 # sets the maximum number of backup networks to save
#set progressbar # display the progress bar
@@ -23,29 +25,39 @@ set capounix MetaPl-Capo10.1
set gnuplotwin wgnuplot.exe
set gnuplotunix gnuplot
-# standard aliases
-alias srw "b; rw; b"
-alias fix "ps; st; ps; srw; ps; plat; cycle -x -F 1; plat; logic; undc; st; zero; ps; scorr; ps"
-alias fixp "st; srw; cycle -x -F 1; logic; undc; st; zero "
+# Niklas Een's commands
+#load_plugin C:\_projects\abc\lib\bip_win.exe "BIP"
+#load_plugin /hd/common/pyabc/bip "BIP"
+# standard aliases
+alias hi history
alias b balance
+alias cg clockgate
alias cl cleanup
alias clp collapse
+alias cs care_set
alias esd ext_seq_dcs
alias f fraig
alias fs fraig_sweep
alias fsto fraig_store
alias fres fraig_restore
+alias fr fretime
alias ft fraig_trust
+alias ic indcut
alias lp lutpack
+alias pcon print_cone
alias pd print_dsd
alias pex print_exdc -d
alias pf print_factor
alias pfan print_fanio
+alias pg print_gates
alias pl print_level
+alias plat print_latch
alias pio print_io
alias pk print_kmap
+alias pm print_miter
alias ps print_stats
+alias psb print_stats -b
alias psu print_supp
alias psy print_symm
alias pun print_unate
@@ -53,16 +65,15 @@ alias q quit
alias r read
alias ra read_aiger
alias r3 retime -M 3
-alias r1 dretime
-alias r2 retime -M 2
-alias r4 retime -M 4
-alias r5 retime -M 5
-alias r6 retime -M 6
+alias r3f retime -M 3 -f
+alias r3b retime -M 3 -b
alias ren renode
alias rh read_hie
+alias ri read_init
alias rl read_blif
alias rb read_bench
alias ret retime
+alias dret dretime
alias rp read_pla
alias rt read_truth
alias rv read_verilog
@@ -70,8 +81,10 @@ alias rvl read_verlib
alias rsup read_super mcnc5_old.super
alias rlib read_library
alias rlibc read_library cadence.genlib
-alias rw drw
-alias rwz drw -z
+alias rty read_liberty
+alias rlut read_lut
+alias rw rewrite
+alias rwz rewrite -z
alias rf refactor
alias rfz refactor -z
alias re restructure
@@ -79,7 +92,6 @@ alias rez restructure -z
alias rs resub
alias rsz resub -z
alias sa set autoexec ps
-alias ua set autoexec
alias scl scleanup
alias sif if -s
alias so source -x
@@ -100,242 +112,36 @@ alias wp write_pla
alias wv write_verilog
# standard scripts
-alias share "b; multi; fx; b"
-alias resyn "b; drw; rwz; b; rwz; b"
-alias resyn2 "b; drw; rf; b; drw; rwz; b; rfz; rwz; b"
-alias resyn2a "b; drw; b; drw; rwz; b; rwz; b"
+alias resyn "b; rw; rwz; b; rwz; b"
+alias resyn2 "b; rw; rf; b; rw; rwz; b; rfz; rwz; b"
+alias resyn2a "b; rw; b; rw; rwz; b; rwz; b"
alias resyn3 "b; rs; rs -K 6; b; rsz; rsz -K 6; b; rsz -K 5; b"
-alias compress "b -l; drw -l; rwz -l; b -l; rwz -l; b -l"
-
-alias compress2 "b -l; drw -l; rf -l; b -l; drw -l; rwz -l; b -l; rfz -l; rwz -l; b -l"
+alias compress "b -l; rw -l; rwz -l; b -l; rwz -l; b -l"
+alias compress2 "b -l; rw -l; rf -l; b -l; rw -l; rwz -l; b -l; rfz -l; rwz -l; b -l"
alias choice "fraig_store; resyn; fraig_store; resyn2; fraig_store; fraig_restore"
alias choice2 "fraig_store; balance; fraig_store; resyn; fraig_store; resyn2; fraig_store; resyn2; fraig_store; fraig_restore"
-alias rwsat "st; drw -l; b -l; drw -l; rf -l"
-alias rwsat2 "st; drw -l; b -l; drw -l; rf -l; fraig; drw -l; b -l; drw -l; rf -l"
-alias shake "st; ps; sat -C 5000; drw -l; ps; sat -C 5000; b -l; rf -l; ps; sat -C 5000; rfz -l; ps; sat -C 5000; rwz -l; ps; sat -C 5000; rfz -l; ps; sat -C 5000"
-
-alias snap fraig_store
-alias unsnap fraig_restore
-alias sv "wl temp"
-alias usv "rl temp"
-alias pli print_latch
-alias cy "cycle -F 1"
-alias im imfs
-alias fx1 "fx -N 1"
-alias el4 "eliminate -N 4"
-alias if6 "if -K 6"
-alias fr fretime -g
-alias icb "ic -M 2 -B 10 -s"
-alias cs "care_set "
+alias rwsat "st; rw -l; b -l; rw -l; rf -l"
+alias drwsat2 "st; drw; b -l; drw; drf; ifraig -C 20; drw; b -l; drw; drf"
+alias share "st; multi -m; fx; resyn2"
+alias addinit "read_init; undc; strash; zero"
+alias blif2aig "undc; strash; zero"
+alias v2p "&vta_gla; &ps; &gla_derive; &put; w 1.aig; pdr -v"
+alias g2p "&ps; &gla_derive; &put; w 2.aig; pdr -v"
# resubstitution scripts for the IWLS paper
-alias src_rw "st; drw -l; rwz -l; rwz -l"
+alias src_rw "st; rw -l; rwz -l; rwz -l"
alias src_rs "st; rs -K 6 -N 2 -l; rs -K 9 -N 2 -l; rs -K 12 -N 2 -l"
-alias src_rws "st; drw -l; rs -K 6 -N 2 -l; rwz -l; rs -K 9 -N 2 -l; rwz -l; rs -K 12 -N 2 -l"
-alias resyn2rs "b; rs -K 6; drw; rs -K 6 -N 2; rf; rs -K 8; b; rs -K 8 -N 2; drw; rs -K 10; rwz; rs -K 10 -N 2; b; rs -K 12; rfz; rs -K 12 -N 2; rwz; b"
-alias compress2rs "b -l; rs -K 6 -l; drw -l; rs -K 6 -N 2 -l; rf -l; rs -K 8 -l; b -l; rs -K 8 -N 2 -l; drw -l; rs -K 10 -l; rwz -l; rs -K 10 -N 2 -l; b -l; rs -K 12 -l; rfz -l; rs -K 12 -N 2 -l; rwz -l; b -l"
-alias c2 "ua; compress2rs; sa"
-alias ic "indcut -v"
-alias lp "lutpack"
-alias c "ua; compress; sa"
-alias c1 "ua; compress;b -l; rs -K 6 -l; drw -l; rs -K 6 -N 2 -l; rf -l; rs -K 8 -l; b -l; sa"
-alias dr dretime
-alias ds dsec -v
-alias dp dprove -v
-
-
-# experimental implementation of don't-cares
-alias resyn2rsdc "b; rs -K 6 -F 2; drw; rs -K 6 -N 2 -F 2; rf; rs -K 8 -F 2; b; rs -K 8 -N 2 -F 2; drw; rs -K 10 -F 2; rwz; rs -K 10 -N 2 -F 2; b; rs -K 12 -F 2; rfz; rs -K 12 -N 2 -F 2; rwz; b"
-alias compress2rsdc "b -l; rs -K 6 -F 2 -l; drw -l; rs -K 6 -N 2 -F 2 -l; rf -l; rs -K 8 -F 2 -l; b -l; rs -K 8 -N 2 -F 2 -l; drw -l; rs -K 10 -F 2 -l; rwz -l; rs -K 10 -N 2 -F 2 -l; b -l; rs -K 12 -F 2 -l; rfz -l; rs -K 12 -N 2 -F 2 -l; rwz -l; b -l"
-
-# minimizing for FF literals
-alias fflitmin "compress2rs; ren; sop; ps -f"
-
-# temporaries
-#alias t "rvl th/lib.v; rvv th/t2.v"
-#alias t "so c/pure_sat/test.c"
-#alias t "r c/14/csat_998.bench; st; ps"
-#alias t0 "r res.blif; aig; mfs"
-#alias t "r res2.blif; aig; mfs"
-
-#alias tt "r a/quip_opt/nut_001_opt.blif"
-#alias ttb "wh a/quip_opt/nut_001_opt.blif 1.blif"
-#alias ttv "wh a/quip_opt/nut_001_opt.blif 1.v"
-
-#alias reach "st; ps; compress2; ps; qrel; ps; compress2; ps; qreach -v; ps"
-
-alias qs1 "qvar -I 96 -u; ps; qbf -P 96"
-alias qs2 "qvar -I 96 -u; qvar -I 97 -u; ps; qbf -P 96"
-alias qs3 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; ps; qbf -P 96"
-alias qs4 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; ps; qbf -P 96"
-alias qs5 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; ps; qbf -P 96"
-alias qs6 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; qvar -I 101 -u; ps; qbf -P 96"
-alias qs7 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; qvar -I 101 -u; qvar -I 102 -u; ps; qbf -P 96"
-alias qs8 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; qvar -I 101 -u; qvar -I 102 -u; qvar -I 103 -u; ps; qbf -P 96"
-alias qs9 "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; qvar -I 101 -u; qvar -I 102 -u; qvar -I 103 -u; qvar -I 104 -u; ps; qbf -P 96"
-alias qsA "qvar -I 96 -u; qvar -I 97 -u; qvar -I 98 -u; qvar -I 99 -u; qvar -I 100 -u; qvar -I 101 -u; qvar -I 102 -u; qvar -I 103 -u; qvar -I 104 -u; qvar -I 105 -u; ps; qbf -P 96"
-
-alias chnew "st; haig_start; resyn2; haig_use"
-alias chnewrs "st; haig_start; resyn2rs; haig_use"
-
-alias stdsd "r test/6in.blif; st; ps; u; bdd; dsd -g; st; ps"
-alias trec "rec_start; r c.blif; st; rec_add; rec_use"
-alias trec4 "rec_start -K 4; r i10.blif; st; rec_add; rec_use"
-alias trec5 "rec_start -K 5; r i10.blif; st; rec_add; rec_use"
-alias trec6 "rec_start -K 6; r i10.blif; st; rec_add; rec_use"
-alias trec7 "rec_start -K 7; r i10.blif; st; rec_add; rec_use"
-alias trec8 "rec_start -K 8; r i10.blif; st; rec_add; rec_use"
-alias trec10 "rec_start -K 10; r i10.blif; st; rec_add; rec_use"
-alias trec12 "rec_start -K 12; r i10.blif; st; rec_add; rec_use"
-
-#alias tsh "r i10_if.blif; st; ps; u; sw; st; ps; cec"
-alias tst4 "r i10_if4.blif; st; ps; r x/rec4_.blif; st; rec_start; r i10_if4.blif; st -r; ps; cec"
-alias tst4n "r i10_if4.blif; st; ps; r 5npn/all_functions.aig; st; rec_start; r i10_if4.blif; st -r; ps; cec"
-alias tst6 "r i10_if6.blif; st; ps; r x/rec6_16_.blif; st; rec_start; r i10_if6.blif; st -r; ps; cec"
-
-alias sn short_names
-
-alias inth "int -rv -C 25000 -N 10000"
-alias inthh "int -rv -C 75000 -N 100"
-alias a "alias "
-
-alias indh "ind -v -F 50 -C 10000"
-alias indhu "ind -vu -F 25 -C 10000"
-#alias reachx "reach -v -B 2000000 -F 20000"
-alias dc2rs "ua; compress2rs; ps"
-
-alias ffx "ps;orpos;qua_ffix"
-alias bfx "ps;orpos;qua_bfix"
-alias era "&get;&era -mv;&put"
-
-#simulations
-alias simh "sim -m -F 500 -W 15"
-alias simhh "sim -m -F 2500 -W 3"
-alias simdeep "sim -m -F 50000 -W 1"
-alias simwide "sim -m -F 500 -W 255"
-
-#BMC's:
-alias bmc2h "bmc2 -v -C 25000 -G 250000 -F 100"
-alias bmc2hh "bmc2 -v -C 75000 -G 750000 -F 100"
-
-
-#SIMPLIFICATIONS
-alias scr "&get; &scorr; &put"
-alias lcr "&get; &lcorr; &put"
-
-alias trm "logic;trim;st;ps"
-
-alias smp "ua;ps;scl;ps;drw;dr;lcorr;drw;dr;ps;scorr;ps;fraig;ps;dc2;dr;scorr -F 2;ps;dc2rs;w temp.aig"
-alias smp1 "scl;drw;dr;lcorr;drw;dr;scorr;fraig;dc2rs"
-alias smp2 "ua;ps;scl;ps;drw;dr;lcorr;drw;dr;ps;scorr;ps;fraig;ps;dc2;dr;ps;dc2rs;w temp.aig"
-alias smpf "ua;ps;scl;lcr;ps;drw;dr;ps;scr;ps;dc2;&get;&scorr -F 2;&put;dr;ps;dc2;ps;w temp.aig"
-
-
-alias &smp "ua;&get;&ps;&scl;&ps;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&ps;&scorr;&ps;&fraig;&ps;&dc2;&put;dr;&get;&scorr -F 2;&ps;&dc2;&put;w temp.aig"
-
-alias smplite '&get;&scl;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&dc2;&put;dr;&get;&dc2;&put;ps;w temp.aig'
-
-alias &smp1 "ua;&get;&ps;&scl;&ps;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&ps;&scorr;&ps;&fraig;&ps;&dc2;&put;dr;&get;&ps;&dc2;&put;w temp.aig"
-
-alias &smpf "ua;ps;drw;&get;&ps;&scl;&ps;&put;dr;&get;&ps;&lcorr;&ps;&dc2;&ps;&scorr;&ps;&put;drw;ps;w temp.aig"
-
-#for each output separately
-alias simpk "dprove -vrcbkmiu -B 10 -D 1000"
-alias simpkh "simpk -D 5000"
-alias simpkf "simpk -D 10"
-
-
-#ABSTRACTIONS
-
-#reparameterization
-alias rpm "ps;&get;&reparam;&put;ps"
-
-#register abstraction
-alias absh "abs -se -D 200000 -R 2; short_names"
-alias abshx "abs -se -D 1000000; short_names"
-alias absr "abs -ser -G 2000; short_names"
-alias absp "abs -sep -G 2000; short_names"
-alias absh1 "abs -se -D 200000 -R 1; short_names"
-
-#ABSTRACTION allowing continuation of register abstraction
-alias absgo "&get; &abs_start -C 10000 -R 2; &ps; &w 1.aig; &abs_derive;&put; w gabs.aig"
-alias absgof "&get; &abs_start -C 1000 -R 2; &ps; &w 1.aig; &abs_derive;&put; w gabs.aig"
-alias absgoh "&get; &abs_start -C 200000 -R 2; &ps; &w 1.aig; &abs_derive;&put;w gabs.aig"
-
-#continuation after a cex is found
-alias absc "&r 1.aig; &ps; &abs_refine; &w 1.aig; &ps; &abs_derive; &ps;&put; w gabs.aig"
-
-#PBA - proof based abstraction. continuation with cex is done with absc.
-alias pbago "&get; &pba_start -vd -C 25000 -F 10; &ps; &w 1.aig; &abs_derive; &put; w gabs.aig"
-
-#SPECULATION
-#initial speculation where equivalences are gathered.
-
-alias spechisysf "ua; &get; &equiv -s -W 512 -F 2000; &semi -v -F 50; &ps; &speci -F 1000 -C 25000; &srm -s; r gsrm.aig; &ps; &w gore.aig"
-
-alias spechisysfx "ua; &get; &equiv -s -W 512 -F 2000; &semi -v -F 50; &ps; &speci -F 10000 -C 200000; &srm -s; r gsrm.aig; &ps; &w gore.aig"
-
-alias spechisysff "ua; &get; &equiv -s -W 512 -F 2000; &semi -v -F 50; &ps; &speci -F 10000 -C 5000; &srm -s; r gsrm.aig; &ps; &w gore.aig"
-
-alias spechisysfq "ua; &get; &equiv -s -W 512 -F 2000; &semi -v -F 50; &ps; &speci -F 10000 -C 10; &srm -s; r gsrm.aig; &ps; &w gore.aig"
-
-
-# CONTINUATION OF SPECULATION
-
-#BMC based:
-alias spec "&r gore.aig;&srm -s;r gsrm.aig; bmc2 -v -F 100 -C 10000 -G 100000; &resim -m; &w gore.aig; &ps "
-
-alias spech "&r gore.aig;&srm -s;r gsrm.aig;smp;ps; bmc2 -v -F 100 -C 25000 -G 250000; &resim -m; &w gore.aig; &ps "
-
-alias spechh "&r gore.aig;&srm ;r gsrm.aig;smp;simpkf;smp;bmc2 -v -F 100 -C 200000; &resim -m; &w gore.aig; &ps "
-
-alias specheavy "&r gore.aig;&srm -s;r gsrm.aig; smp;simpk;smp;bmc2 -v -F 5000 -C 200000 -F 100; &resim -m; &w gore.aig; &ps"
-
-
-#BDD based:
-alias specb "&r gore.aig;&srm ;r gsrm.aig;smp;ps; reach -ov -B 1000000 -F 200000; &resim -m; &w gore.aig; &ps "
-
-alias specbb "&r gore.aig;&srm ;r gsrm.aig;smp;simpk -D 100;smp;ps; reach -ov -B 1000000 -F 200000; &resim -m; &w gore.aig; &ps "
-
-
-#Interpolation based:
-alias specint "&r gore.aig;&srm ;r gsrm.aig;inth;&resim -m; &w gore.aig; &ps"
-
-alias speck "&r gore.aig;&srm ;r gsrm.aig;simpk;&resim -m; &w gore.aig; &ps "
-
-alias speckf "&r gore.aig;&srm ;r gsrm.aig;simpk -D 100;&resim -m; &w gore.aig; &ps "
-
-alias specpk "&r gore.aig;&srm ;r gsrm.aig;simpkf;smp;ps; simpk ; &resim -m; &w gore.aig; &ps "
-
-alias specpkh "&r gore.aig;&srm ;r gsrm.aig;simpkf;smp;ps; simpkh ; &resim -m; &w gore.aig; &ps "
-
-alias specp "&r gore.aig;&srm ;r gsrm.aig;ps; dprove -rmficbu -B 10 -D 10; &resim -m; &w gore.aig; &ps "
-
-alias spece "&r gore.aig; &srm ;r gsrm.aig; smp; ps; &w gore.aig; &get; &era -m; &r gore.aig; &resim -m; &w gore.aig; &ps"
-
-#simulation based:
-alias specs "&r gore.aig; &srm ; r gsrm.aig; sim -m -F 500 -W 15; &resim -m; &w gore.aig; &ps "
-
-alias specsh "&r gore.aig; &srm ; r gsrm.aig; sim -m -F 3500 -W 3; &resim -m; &w gore.aig;&ps "
-
-alias speci "&r gore.aig;&srm ;r gsrm.aig;int -tk -C 2000;&resim -m; &w gore.aig; &ps"
-
-
-
-alias %sa "set autoexec %ps"
-alias %scr "%get;%st;%scorr;%put;st"
-
-alias sc "fold;w tempc.aig;unfold -s"
-alias uc "r tempc.aig;unfold -s"
-alias smpc "scl;drw;ps;scorr -c;ps;fraig;ps;compress2rs;ps"
-
-alias abseen "&get;,abs -vt=60 -timeout=60 -bob=10 -aig=temp.aig;r temp.aig"
-alias pdreen "&get;,pdr -vt=60"
-alias inteen "&get;,imc -vt=60"
-alias bmceen "&get;,bmc -vt=60"
-
+alias src_rws "st; rw -l; rs -K 6 -N 2 -l; rwz -l; rs -K 9 -N 2 -l; rwz -l; rs -K 12 -N 2 -l"
+alias resyn2rs "b; rs -K 6; rw; rs -K 6 -N 2; rf; rs -K 8; b; rs -K 8 -N 2; rw; rs -K 10; rwz; rs -K 10 -N 2; b; rs -K 12; rfz; rs -K 12 -N 2; rwz; b"
+alias compress2rs "b -l; rs -K 6 -l; rw -l; rs -K 6 -N 2 -l; rf -l; rs -K 8 -l; b -l; rs -K 8 -N 2 -l; rw -l; rs -K 10 -l; rwz -l; rs -K 10 -N 2 -l; b -l; rs -K 12 -l; rfz -l; rs -K 12 -N 2 -l; rwz -l; b -l"
+# use this script to convert 1-valued and DC-valued flops for an AIG
+alias fix_aig "logic; undc; strash; zero"
+# use this script to convert 1-valued and DC-valued flops for a logic network coming from BLIF
+alias fix_blif "undc; strash; zero"
+# lazy man's synthesis
+alias recadd3 "st; rec_add3; b; rec_add3; dc2; rec_add3; if -K 8; bidec; st; rec_add3; dc2; rec_add3; if -g -K 6; st; rec_add3"
diff --git a/scripts/main.py b/scripts/main.py
index d21ced80..8f8a8721 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -1,4 +1,6 @@
import os
+import time
+from datetime import *
##import par
##from abc_common import *
import abc_common
@@ -17,26 +19,6 @@ from pyabc_split import *
#x('source ../../abc.rc')
#abc_common.x('source ../../abc.rc')
-
-#IBM directories
-# directories = ['ibmhard']
-ibmhard = (33,34,36,37,28,40,42,44,48,5,49,50,52,53,58,59,6,64,66,67,68,\
- 69,70,71,72,73,74,75,76,78,79,80,81,82,83,84,86,9,87,89,90,0,10,\
- 11,12,14,15,16,2,19,20,29,31,32)
-
-
-#directories = ['IBM_converted']
-#ibm_convert = (3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,\
-# 27,28,29,30,31,32,33,34,35,36,37,38,39,40,41)
-#IBM_converted = range(42)[3:]
-
-#the smaller files in ascending order
-#IBM_converted = (26,38,21,15,22,23,20,27,16,19,24,28,18,9,8,7,6,25,17,3,4)
-
-#the larger file in order (skipping 39)
-#IBM_converted = (39,5,40,41,13,12,10,14,11,31,33,36,29,34,35,37,30)
-IBM_converted = (5,40,41,13,12,10,14,11,31,33,36,29,34,35,37,30)
-
def scorriter():
""" apply scorr with increasing conflicts up to 100"""
x('time')
@@ -159,6 +141,48 @@ def list_aig(s=''):
result = result + [name]
return result
+def list_original():
+ list_dir = os.listdir('.')
+ list_dir.sort()
+ out = []
+ for i in range(len(list_dir)):
+ name = '%s/original.aig'%list_dir[i]
+ if os.access('%s'%name,os.R_OK):
+ abc('r %s;&ps'%name)
+ if n_latches() > 0 and n_pos() > 1:
+ size = str(sizeof())
+ print list_dir[i],
+ ps()
+ out = out + ['%s: %s'%(list_dir[i],size)]
+ return out
+
+def list_size(s=''):
+ """ prnts out the sizes of aig files. Leaves .aig as part of name"""
+ #os.chdir('../ibm-web')
+ list_all = os.listdir('.')
+ dir = lst(list_all,'.aig')
+ dir.sort()
+ result = []
+ for j in range(len(dir)):
+## name = dir[j][:-4]
+ name = dir[j] #leaves .aig as part of name
+ if not s_in_s(s,name):
+ continue
+ print '%s '%name,
+## abc('r %s.aig'%name)
+ abc('r %s'%name)
+ ps()
+ result = result + [name[:-4]] #takes .aig off of name
+ return result
+
+def rename(l=[]):
+ for j in range(len(l)):
+ name = l[j]
+ name1 = name +'.aig'
+ name2 = name[:-4]+'simp.aig' #take off _smp and put on simp
+ os.rename(name1,name2)
+
+
def list_blif(s=''):
""" prnts out the sizes of aig files"""
#os.chdir('../ibm-web')
@@ -222,11 +246,15 @@ def cleanup():
(s_in_s('_bip', name)) or (s_in_s('sm0', name)) or (s_in_s('gabs', name))
or (s_in_s('temp', name)) or (s_in_s('__', name)) or (s_in_s('greg', name)) or (s_in_s('tf2', name))
or (s_in_s('gsrm', name)) or (s_in_s('_rpm', name )) or (s_in_s('gsyn', name)) or (s_in_s('beforerpm', name))
- or (s_in_s('afterrpm', name)) or (s_in_s('initabs', name)) or (s_in_s('.status', name)) or (s_in_s('_init', name))
+ or (s_in_s('afterrpm', name)) or (s_in_s('initabs', name)) or (s_in_s('_init', name))
or (s_in_s('_osave', name)) or (s_in_s('tt_', name)) or (s_in_s('_before', name)) or (s_in_s('_after', name))
- or (s_in_s('_and', name)) or (s_in_s('_final', name)) or (s_in_s('_spec', name)) or (s_in_s('temp.a', name))
+ or (s_in_s('_and', name)) or (s_in_s('_spec', name)) or (s_in_s('temp.a', name))
or (s_in_s('_sync', name)) or (s_in_s('_old', name)) or (s_in_s('_cone_', name)) or (s_in_s('_abs', name))
- or (s_in_s('_vabs', name))
+ or (s_in_s('_vabs', name)) or (s_in_s('_gla', name)) or (s_in_s('vabs', name)) or (s_in_s('_mp2', name))
+ or (s_in_s('_sc1', name)) or (s_in_s('_sc2', name)) or (s_in_s('_after', name))
+ or (s_in_s('_before', name)) or (s_in_s('_aigs_', name)) or (s_in_s('_cex.', name))
+ or (s_in_s('_bmc1', name)) or (s_in_s('_p0_', name)) or (s_in_s('_p1_', name))
+ or (s_in_s('_unsolv', name)) or (s_in_s('_iso1', name))
):
os.remove(name)
@@ -296,7 +324,33 @@ def absn(a,c,s):
write_file('absn')
return "Done"
-
+def time_stamp():
+ d=datetime.today()
+ s = d.strftime('%m.%d.%y-%X')
+ return s
+
+def apply_sp(list):
+ global m_trace
+ s = time_stamp()
+ out_name = "%s-traces.txt"%s
+ print out_name
+ if os.access(out_name,os.R_OK):
+ os.remove(out_name)
+ f = open(out_name,'w')
+ print f
+ for j in range(len(list)):
+ name = list[j]
+ print '\n\n**** %s ****\n'%name
+ f.write('\n\n****%s ****'%name)
+ read_file_quiet(name)
+ result = super_prove()
+ trace = result[1]
+ s = str(trace)
+ f.write('\n\n')
+ f.write(s)
+ f.flush()
+ f.close()
+
def xfiles():
global f_name
#output = sys.stdout
diff --git a/scripts/simple_prove.sh b/scripts/multi_prove.sh
index 1fc8773b..08a36c8f 100644
--- a/scripts/simple_prove.sh
+++ b/scripts/multi_prove.sh
@@ -11,4 +11,4 @@ abc_root()
abc_dir=$(abc_root "$0")
bin_dir="${abc_dir}"/bin
-exec ${bin_dir}/abc -c "/simple_prove_aiger $*"
+exec ${bin_dir}/abc -c "/multi_prove_aiger $*"
diff --git a/scripts/new_abc_commands.py b/scripts/new_abc_commands.py
index 93956f9c..bf6730bf 100644
--- a/scripts/new_abc_commands.py
+++ b/scripts/new_abc_commands.py
@@ -7,6 +7,8 @@ import shutil
import redirect
import optparse
+from contextlib import contextmanager
+
def read_cmd(args):
if len(args)==2:
par.read_file_quiet(args[1])
@@ -90,7 +92,50 @@ def print_aiger_result(args):
pyabc.add_abc_command(print_aiger_result, "ZPython", "/print_aiger_result", 0)
-def proof_command_wrapper(prooffunc, category_name, command_name, change):
+@contextmanager
+def replace_report_result(multi):
+
+ def report_result(po, result):
+
+ print "REPORT RESULT: ", po, result
+
+ print >> stdout, "%d"%result
+ print >> stdout, "b%d"%po
+ print >> stdout, "."
+
+ def report_liveness_result(po, result):
+
+ print "REPORT RESULT: ", po, result
+
+ print >> stdout, "%d"%result
+ print >> stdout, "j%d"%po
+ print >> stdout, "."
+
+ def report_bmc_depth(depth):
+
+ if not multi:
+ print "REPORT BMC DEPTH:", depth
+ print >> stdout, "u%d"%depth
+
+ with redirect.save_stdout() as stdout:
+
+ old_report_result = par.report_result
+ par.report_result = report_result
+
+ #old_report_liveness_result = par.report_liveness_result
+ par.report_liveness_result = report_liveness_result
+
+ old_report_bmc_depth = par.report_bmc_depth
+ par.report_bmc_depth = report_bmc_depth
+
+ try:
+ yield
+ finally:
+ par.report_result = old_report_result
+ #~ par.report_liveness_result = report_liveness_result
+ par.report_bmc_depth = old_report_bmc_depth
+
+def proof_command_wrapper_internal(prooffunc, category_name, command_name, change, multi=False):
def wrapper(argv):
@@ -104,53 +149,106 @@ def proof_command_wrapper(prooffunc, category_name, command_name, change):
options, args = parser.parse_args(argv)
if len(args) != 2:
- print args
parser.print_usage()
return 0
aig_filename = os.path.abspath(args[1])
- if not options.noisy:
- pyabc.run_command('/pushredirect')
-
- if not options.current_dir:
- pyabc.run_command('/pushdtemp')
-
- try:
- for d in os.environ['PATH'].split(':'):
- bip = os.path.join(d, 'bip')
- if os.path.exists(bip):
- pyabc.run_command("load_plugin %s Bip"%bip)
- break
-
- basename = os.path.basename( aig_filename )
- shutil.copyfile(aig_filename, basename)
- aig_filename = basename
-
- par.read_file_quiet(aig_filename)
- result = prooffunc()
-
- par.cex_list = []
- except:
- result = None
+ with replace_report_result(multi):
- if not options.current_dir:
- pyabc.run_command('/popdtemp')
+ if not options.noisy:
+ pyabc.run_command('/pushredirect')
+
+ if not options.current_dir:
+ pyabc.run_command('/pushdtemp')
+
+ try:
+ for d in os.environ['PATH'].split(':'):
+ bip = os.path.join(d, 'bip')
+ if os.path.exists(bip):
+ pyabc.run_command("load_plugin %s Bip"%bip)
+ break
+
+ basename = os.path.basename( aig_filename )
+ shutil.copyfile(aig_filename, basename)
+ aig_filename = basename
+
+ result = prooffunc(aig_filename)
+
+ par.cex_list = []
+ except:
+ result = None
- if not options.noisy:
- pyabc.run_command('/popredirect')
+ if not multi:
- if result=="SAT":
- print 1
- elif result=="UNSAT":
- print 0
- else:
- print 2
+ if result=="SAT":
+ par.report_result(0,1)
+ elif result=="UNSAT":
+ par.report_result(0,0)
+ elif type(result)==list and len(result)>0 and result[0] == "SAT":
+ par.report_result(0,1)
+ elif type(result)==list and len(result)>0 and result[0] == "UNSAT":
+ par.report_result(0,0)
+ else:
+ par.report_result(0,2)
+
+ if not options.current_dir:
+ pyabc.run_command('/popdtemp')
+
+ if not options.noisy:
+ pyabc.run_command('/popredirect')
return 0
pyabc.add_abc_command(wrapper, category_name, command_name, change)
-proof_command_wrapper(par.super_prove, 'HWMCC11', '/super_prove_aiger', 0)
-proof_command_wrapper(par.simple_prove, 'HWMCC11', '/simple_prove_aiger', 0)
-proof_command_wrapper(par.simple_bip, 'HWMCC11', '/simple_bip_aiger', 0)
+def proof_command_wrapper(prooffunc, category_name, command_name, change, multi=False):
+ def pf(aig_filename):
+ par.read_file_quiet(aig_filename)
+ return prooffunc()
+ return proof_command_wrapper_internal(pf, category_name, command_name, change, multi)
+
+proof_command_wrapper(par.sp, 'HWMCC13', '/super_prove_aiger', 0)
+proof_command_wrapper(par.simple, 'HWMCC13', '/simple_aiger', 0)
+proof_command_wrapper(par.simple_bip, 'HWMCC13', '/simple_bip_aiger', 0)
+proof_command_wrapper(par.simple_sat, 'HWMCC13', '/simple_sat_aiger', 0)
+proof_command_wrapper(par.mp, 'HWMCC13', '/multi_prove_aiger', 0, multi=True)
+
+from niklas import run_niklas_multi
+
+def simple_liveness_prooffunc(aig_filename):
+
+ def simplify(aiger_in, aiger_out):
+
+ saved = utils.save_po_info(aiger_in, tmp[0])
+
+ par.read_file_quiet(tmp[0])
+ par.pre_simp()
+ pyabc.run_command( 'write_aiger %s'%tmp[1] )
+
+ #~ pyabc.run_command( 'read_aiger %s'%tmp[0] )
+ #~ pyabc.run_command( 'dc2 ; dc2 ; dc2 ; dc2' )
+ #~ pyabc.run_command( 'write_aiger %s'%tmp[1] )
+
+ utils.restore_po_info( saved, tmp[1], aiger_out )
+
+ def report_result(id, res):
+
+ if res and 'result' in res:
+ result = res['result']
+ if result=='proved':
+ par.report_liveness_result(id, 0)
+ return True
+ elif result=='failed':
+ par.report_liveness_result(id, 1)
+ return True
+
+ return False
+
+ try:
+ run_niklas_multi(aig_filename, simplify=simplify, report_result=report_result)
+ except:
+ import traceback
+ traceback.print_exc()
+
+proof_command_wrapper_internal( simple_liveness_prooffunc, "HWMCC13", "/simple_liveness_aiger", 0, multi=True)
diff --git a/scripts/niklas.py b/scripts/niklas.py
new file mode 100644
index 00000000..cfab8acc
--- /dev/null
+++ b/scripts/niklas.py
@@ -0,0 +1,182 @@
+import os
+import sys
+import time
+import tempfile
+import subprocess
+
+from contextlib import contextmanager
+
+import pyabc
+import pyabc_split
+
+@contextmanager
+def temp_file_names(N, suffix=""):
+
+ files = []
+
+ try:
+
+ for i in xrange(N):
+ files.append( tempfile.NamedTemporaryFile(suffix=suffix) )
+
+ yield [ f.name for f in files ]
+
+ finally:
+ for f in files:
+ f.close()
+
+def parse_bip_status(status):
+
+ res = {}
+
+ for line in open(status, 'r'):
+
+ line = line.strip()
+
+ colon = line.find(':')
+
+ if colon < 0:
+ continue
+
+ field = line[:colon]
+ data = line[colon+2:]
+
+ res[field] = data
+
+ return res
+
+def run_bip(args, aiger):
+
+ with temp_file_names(1) as tmpnames:
+
+ args = [
+ 'bip',
+ '-abc',
+ '-input=%s'%aiger,
+ '-output=%s'%tmpnames[0],
+ ] + args;
+
+ rc = subprocess.call(args, preexec_fn=pyabc._set_death_signal)
+
+ if rc!=0:
+ return None
+
+ return parse_bip_status(tmpnames[0])
+
+from pyaig import AIG, read_aiger, write_aiger, utils
+
+def run_niklas_single(aiger, simplify, report_result, timeout=None):
+
+ orig_args = [
+ [ ',live', '-k=l2s', '-eng=treb-abs' ],
+ [ ',live', '-k=inc' ],
+ [ ',live', '-k=l2s', '-eng=bmc' ],
+ ]
+
+ simplified_args = [
+ [ ',live', '-k=inc' ],
+ [ ',live', '-k=l2s', '-eng=bmc' ],
+ [ ',live', '-k=l2s', '-eng=treb' ],
+ ]
+
+ with temp_file_names(1, suffix='.aig') as simple_aiger:
+
+ orig_funcs = [ pyabc_split.defer(run_bip)(a, aiger) for a in orig_args ]
+ simplified_funcs = [ pyabc_split.defer(run_bip)(a, simple_aiger[0]) for a in simplified_args ]
+
+ with pyabc_split.make_splitter() as splitter:
+
+ sleep_id = splitter.fork_one( lambda : time.sleep(timeout ) ) if timeout else None
+
+ ids = splitter.fork_all( orig_funcs )
+ kill_if_simplified = ids[1:]
+
+ simplifier_id = splitter.fork_one( pyabc_split.defer(simplify)(aiger, simple_aiger[0]) )
+
+ for id, res in splitter:
+
+ if id == sleep_id:
+ return False
+
+ if id == simplifier_id:
+ if not res:
+ continue
+ splitter.kill(kill_if_simplified)
+ splitter.fork_all( simplified_funcs )
+ continue
+
+ if report_result(res):
+ return True
+
+ return False
+
+def run_niklas_multi(aiger, simplify, report_result):
+
+ with open(aiger, 'r') as fin:
+ aig = read_aiger( fin )
+
+ n_j_pos = aig.n_justice()
+ assert n_j_pos > 0
+
+ if n_j_pos==1:
+ return run_niklas_single( aiger, simplify, report_result=lambda res: report_result(0, res) )
+
+ with temp_file_names(n_j_pos, suffix='.aig') as single_aiger:
+
+ def extract(j_po):
+ with open(single_aiger[j_po], 'w') as fout:
+ write_aiger(utils.extract_justice_po(aig, j_po), fout)
+
+ for _ in pyabc_split.split_all_full( [pyabc_split.defer(extract)(i) for i in xrange(n_j_pos) ] ):
+ pass
+
+ unsolved = set( xrange(n_j_pos) )
+
+ timeout = 1
+
+ while unsolved:
+ for j_po in sorted(unsolved):
+ if run_niklas_single( single_aiger[j_po], simplify, report_result=lambda res: report_result(j_po, res), timeout=timeout ):
+ unsolved.remove(j_po)
+ timeout *= 2
+
+ return not unsolved
+
+if __name__ == "__main__":
+
+ def simplify(aiger_in, aiger_out):
+
+ with temp_file_names(2, suffix='.aig') as tmp:
+
+ saved = utils.save_po_info(aiger_in, tmp[0])
+
+ pyabc.run_command( 'read_aiger %s'%tmp[0] )
+ pyabc.run_command( 'dc2 ; dc2 ; dc2 ; dc2' )
+ pyabc.run_command( 'write_aiger %s'%tmp[1] )
+
+ utils.restore_po_info( saved, tmp[1], aiger_out )
+
+ return True
+
+ def report_result(id, res):
+
+ if res and 'result' in res:
+ result = res['result']
+ if result=='proved':
+ print "PROVED: ", id
+ return True
+ elif result=='failed':
+ print "FAILED:", id
+ return True
+
+ return False
+
+ aiger = "test.aig"
+
+ while True:
+ try:
+ run_niklas_multi(aiger, simplify=simplify, report_result=report_result)
+ except:
+ import traceback
+ traceback.print_exc()
+
diff --git a/scripts/par.py b/scripts/par.py
index c2f939ab..110c5677 100644
--- a/scripts/par.py
+++ b/scripts/par.py
@@ -11,7 +11,8 @@ import filecmp
global G_C,G_T,latches_before_abs,latches_before_pba,n_pos_before,x_factor,methods,last_winner
global last_cex,JV,JP, cex_list,max_bmc, last_cx, pord_on, trim_allowed, temp_dec, abs_ratio, ifbip
-global if_no_bip
+global if_no_bip, gabs, gla, sec_options,last_gasp_time, abs_ref_time, bmcs1, total_spec_refine_time
+global last_gap
"""
The functions that are currently available from module _abc are:
@@ -49,7 +50,7 @@ stackno_gabs = stackno_gore = stackno_greg= 0
STATUS_UNKNOWN = -1
STATUS_SAT = 0
STATUS_UNSAT = 1
-RESULT = ('SAT' , 'SAT', 'UNSAT', 'UNDECIDED', 'UNDECIDED,', 'ERROR' )
+RESULT = ('SAT', 'SAT', 'UNSAT', 'UNDECIDED', 'UNDECIDED', 'ERROR')
Sat = Sat_reg = 0
Sat_true = 1
Unsat = 2
@@ -75,37 +76,83 @@ sec_sw = False
sec_options = ''
cex_list = []
TERM = 'USL'
-ifbip = 0 # sets the abtraction method
-if_no_bip = 0 #True sets it up so it can use bip and reachx commands.
-abs_ratio = .5 #this controls when abstract stops
+last_gasp_time = 10000
+last_gasp_time = 500
+last_gasp_time = 900 #set to conform to hwmcc12
+use_pms = True
+
+#gabs = False #use the gate refinement method after vta
+#abs_time = 100
+
+####################################
+#default abstraction methods
+gabs = False #False = use gla refinement, True = use reg refinement.
+gla = True #use gla_abs instead of vta_abs
+##abs_time = 10000 #number of sec before initial abstraction terminates.
+abs_time = 150
+abs_time = 5000
+abs_time = 500
+abs_time = 100
+abs_ref_time = 50 #number of sec. allowed for abstraction refinement.
+total_spec_refine_time = 150
+ifbip = 0 # sets the abtraction method to vta or gla, If = 1 then uses ,abs
+if_no_bip = False #True sets it up so it can't use bip and reachx commands.
+abs_ratio = .5 #this controls when abstraction is too big and gives up
+#####################################
+
+def abstr_a(t1=200,t2=200,absr=0):
+ global abs_time, abs_ref_time, abs_ratio
+ if not absr == 0:
+ abs_ratio_old = abs_ratio
+ abs_ratio = absr
+ abs_time = t1
+ abs_ref_time = t2
+ abstracta(False)
+ if not absr == 0:
+ abs_ratio = abs_ratio_old
+
t_init = 2 #initial time for poor man's concurrency.
-methods = ['PDR', 'INTRP', 'BMC',
- 'SIM', 'REACHX',
- 'PRE_SIMP', 'Simple_prove', 'PDRM', 'REACHM', 'BMC3','Min_Retime',
+
+def set_global(s=''):
+ global G_C,G_T,latches_before_abs,latches_before_pba,n_pos_before,x_factor,methods,last_winner
+ global last_cex,JV,JP, cex_list,max_bmc, last_cx, pord_on, trim_allowed, temp_dec, abs_ratio, ifbip
+ global if_no_bip, gabs, gla, sec_options,last_gasp_time,abs_ref_time, abs_time,use_pms
+ exec(s)
+
+
+methods = ['PDR', 'INTRP', 'BMC', 'SIM', 'REACHX',
+ 'PRE_SIMP', 'simple', 'PDRM', 'REACHM', 'BMC3','Min_Retime',
'For_Retime','REACHP','REACHN','PDR_sd','prove_part_2',
'prove_part_3','verify','sleep','PDRM_sd','prove_part_1',
'run_parallel','INTRPb', 'INTRPm', 'REACHY', 'REACHYc','RareSim','simplify', 'speculate',
- 'quick_sec', 'BMC_J', 'BMC2', 'extract -a', 'extract', 'PDRa']
+ 'quick_sec', 'BMC_J', 'BMC2', 'extract -a', 'extract', 'PDRa', 'par_scorr', 'dsat', 'iprove']
#'0.PDR', '1.INTERPOLATION', '2.BMC', '3.SIMULATION',
-#'4.REACHX', '5.PRE_SIMP', '6.SUPER_PROVE(2)', '7.PDRM', '8.REACHM', 9.BMC3'
+#'4.REACHX', '5.PRE_SIMP', '6.simple', '7.PDRM', '8.REACHM', 9.BMC3'
# 10. Min_ret, 11. For_ret, 12. REACHP, 13. REACHN 14. PDRseed 15.prove_part_2,
#16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,
#21.run_parallel, 22.INTRP_bwd, 23. Interp_m 24. REACHY 25. REACHYc 26. Rarity Sim 27. simplify
#28. speculate, 29. quick_sec, 30 bmc3 -S, 31. BMC2 32. extract -a 33. extract 34. pdr_abstract
+#35 par_scorr, 36. dsat, 37. iprove
win_list = [(0,.1),(1,.1),(2,.1),(3,.1),(4,.1),(5,-1),(6,-1),(7,.1)]
-FUNCS = ["(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
- "(pyabc_split.defer(abc)('&get;,imc -vt=%f'%(t)))",
- "(pyabc_split.defer(abc)('&get;,bmc -vt=%f'%t))",
+FUNCS = ["(pyabc_split.defer(pdr)(t))",
+## "(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
+ "(pyabc_split.defer(intrp)(t))",
+## "(pyabc_split.defer(abc)('&get;,imc -vt=%f'%(t)))",
+## "(pyabc_split.defer(abc)('&get;,imc-sofa -vt=%f'%(t)))",
+ "(pyabc_split.defer(bmc)(t))",
+## "(pyabc_split.defer(abc)('&get;,bmc -vt=%f'%t))",
"(pyabc_split.defer(simulate)(t))",
- "(pyabc_split.defer(abc)('reachx -t %d'%t))",
+ "(pyabc_split.defer(reachx)(t))",
+## "(pyabc_split.defer(abc)('reachx -t %d'%t))",
"(pyabc_split.defer(pre_simp)())",
## "(pyabc_split.defer(super_prove)(2))",
- "(pyabc_split.defer(simple)())",
+ "(pyabc_split.defer(simple)(t))",
"(pyabc_split.defer(pdrm)(t))",
"(pyabc_split.defer(abc)('&get;&reachm -vcs -T %d'%t))",
- "(pyabc_split.defer(abc)('bmc3 -C 1000000 -T %f'%t))",
- "(pyabc_split.defer(abc)('dr;&get;&lcorr;&dc2;&scorr;&put;dr'))",
- "(pyabc_split.defer(abc)('dr -m;&get;&lcorr;&dc2;&scorr;&put;dr'))",
+ "(pyabc_split.defer(bmc3)(t))",
+## "(pyabc_split.defer(abc)('bmc3 -C 1000000 -T %f'%t))",
+ "(pyabc_split.defer(abc)('dretime;&get;&lcorr;&dc2;&scorr;&put;dretime'))",
+ "(pyabc_split.defer(abc)('dretime -m;&get;&lcorr;&dc2;&scorr;&put;dretime'))",
"(pyabc_split.defer(abc)('&get;&reachp -vr -T %d'%t))",
"(pyabc_split.defer(abc)('&get;&reachn -vr -T %d'%t))",
## "(pyabc_split.defer(abc)('&get;,pdr -vt=%f -seed=521'%t))",
@@ -119,37 +166,51 @@ FUNCS = ["(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
"(pyabc_split.defer(run_parallel)(JP,t,'TERM'))",
"(pyabc_split.defer(abc)('&get;,imc -bwd -vt=%f'%t))",
## "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 2 -T %f'%t))",
- "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 1 -T %f'%t))",
- "(pyabc_split.defer(abc)('&get;&reachy -v -T %d'%t))",
+ "(pyabc_split.defer(intrpm)(t))",
+## "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 1 -T %f'%t))",
+ "(pyabc_split.defer(reachy)(t))",
+## "(pyabc_split.defer(abc)('&get;&reachy -v -T %d'%t))",
"(pyabc_split.defer(abc)('&get;&reachy -cv -T %d'%t))",
"(pyabc_split.defer(simulate2)(t))",
"(pyabc_split.defer(simplify)())",
"(pyabc_split.defer(speculate)())",
"(pyabc_split.defer(quick_sec)(t))",
"(pyabc_split.defer(bmc_j)(t))",
- "(pyabc_split.defer(abc)('bmc2 -C 1000000 -T %f'%t))",
+## "(pyabc_split.defer(abc)('bmc2 -C 1000000 -T %f'%t))",
+ "(pyabc_split.defer(bmc2)(t))",
"(pyabc_split.defer(extractax)('a'))",
"(pyabc_split.defer(extractax)())",
"(pyabc_split.defer(pdra)(t))",
+ "(pyabc_split.defer(pscorr)(t))",
+ "(pyabc_split.defer(dsat)(t))",
+ "(pyabc_split.defer(iprove)(t))"
]
## "(pyabc_split.defer(abc)('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max_bmc))))"
#note: interp given 1/2 the time.
+
+## Similar engines below listed in the order of priority, high to low.
allreachs = [4,8,12,13,24,25]
+allreachs = [24,4]
reachs = [24]
##allpdrs = [14,7,34,19,0]
-allpdrs = [14,7,34,19]
-pdrs = [34,7,14]
-allbmcs = [2,9,30,31]
-exbmcs = [2,9]
+allpdrs = [34,7,14,19,0]
+allpdrs2 = [34,7,14,19,0]
+pdrs = [34,7,14,0]
+allbmcs = [9,30,2,31]
+exbmcs = [2,9,31]
bmcs = [9,30]
-allintrps = [1,22,23]
+bmcs1 = [9]
+allintrps = [23,1,22]
bestintrps = [23]
-intrps = [23]
-allsims = [3,26]
+##intrps = [23,1]
+intrps = [23,1] #putting ,imc-sofa first for now to test
+allsims = [26,3]
sims = [26]
allslps = [18]
slps = [18]
imc1 = [1]
+pre = [5]
+combs = [36,37]
JVprove = [7,23,4,24]
JV = pdrs+intrps+bmcs+sims #sets what is run in parallel '17. verify' above
@@ -166,7 +227,9 @@ JP = JV + [27] # sets what is run in '21. run_parallel' above 27 simplify shoul
def initialize():
global xfi, max_bmc, last_time,j_last, seed, init_simp, K_backup, last_verify_time
global init_time, last_cex, last_winner, trim_allowed, t_init, sec_options, sec_sw
- global n_pos_before, n_pos_proved, last_cx, pord_on, temp_dec, abs_time
+ global n_pos_before, n_pos_proved, last_cx, pord_on, temp_dec, abs_time, gabs, gla,m_trace
+ global smp_trace,hist,init_initial_f_name, skip_spec, t_iter_start,last_simp, final_all, scorr_T_done
+ global last_gap
xfi = x_factor = 1 #set this to higher for larger problems or if you want to try harder during abstraction
max_bmc = -1
last_time = 0
@@ -183,30 +246,86 @@ def initialize():
t_init = 2 #this will start sweep time in find_cex_par to 2*t_init here
sec_sw = False
sec_options = ''
+ smp_trace = m_trace = []
cex_list = []
n_pos_before = n_pos()
n_pos_proved = 0
- abs_time = 10000
+ abs_time = 150
+ abs_ref_time = 50 #number of sec. allowed for abstraction refinement.
+ total_spec_refine_time = 150
+ abs_ratio = .5
+ hist = []
+ skip_spec = False
+ t_iter_start = 0
+ inf = 10000000
+ last_simp = [inf,inf,inf,inf]
+ final_all = 1
+ scorr_T_done = 0
+ last_gap = 0
+## abs_time = 100
+## gabs = False
+## abs_time = 500
+## gabs = True
+
+
def set_abs_method():
""" controls the way we do abstraction, 0 = no bip, 1 = old way, 2 use new bip and -dwr
see absab()
"""
- global ifbip
- print 'Set method of abstraction: \n0 = use vta and no bips, \n1 = old way, \nelse = use ,abs and -dwr'
+ global ifbip, abs_time,gabs,gla,if_no_bip
+ print 'current values ifbip = %d, abs_time = %d'%(ifbip,abs_time)
+ print 'Set method of abstraction: \n0 = vta for 500 and gla refin., \n1 = old way, \n2 = ,abs and -dwr, \n3 = vta for 100 followed by gla refine.,\n4 = vta for 500 then gla refine. but no bip methods gla refine., \n5 = gla and gla refine.'
s = raw_input()
s = remove_spaces(s)
- if s == '1':
+ if s == '1': #use the old way with ,abs but no dwr
ifbip = 1 #old way
- elif s == '0':
- ifbip = 0 #use vta and no bip
- else:
- ifbip = 2 #use ,abc -dwr
- print 'ifbip set to %d. Note engines are set only when read_file is done'%ifbip
+ abs_time = 100
+ if_no_bip = False
+ gabs = True
+ gla = False
+ elif s == '0':#use vta and gla refinement
+ ifbip = 0
+ abs_time = 500
+ if_no_bip = False
+ gabs = False
+ gla = False
+ elif s == '2': #use ,abc -dwr
+ ifbip = 2
+ abs_time = 100
+ if_no_bip = False
+ gabs = True #use register refinement
+ gla = False
+ elif s == '3': #use vta and gla refinement
+ ifbip = 0
+ abs_time = 100
+ if_no_bip = False
+ gabs = False
+ gla = False
+ elif s == '4': #use vta, gla refine. and no bip
+ ifbip = 0
+ abs_time = 100
+ if_no_bip = True
+ gabs = True
+ gla = False
+ elif s == '5': #use gla and gla_refinement
+ ifbip = 0
+ abs_time = 100
+ if_no_bip = False
+ gabs = False
+ gla = True
+ #should make any of the methods able to us no bip
+ print 'ifbip = %d, abs_time = %d, gabs = %d, if_no_bip = %d, gla = %d'%(ifbip,abs_time,gabs,if_no_bip,gla)
def ps():
print_circuit_stats()
+def iprove(t=100):
+ abc('iprove')
+
+def dsat(t=100):
+ abc('dsat')
+
def n_real_inputs():
"""This gives the number of 'real' inputs. This is determined by trimming away inputs that
have no connection to the logic. This is done by the ABC alias 'trm', which changes the current
@@ -228,6 +347,7 @@ def timer(t):
return time.clock() - btime
def sleep(t):
+## print 'Sleep time = %d'%t
time.sleep(t)
return Undecided
@@ -245,9 +365,9 @@ def abc_redirect_all( cmd ):
with redirect.redirect( redirect.null_file, sys.stderr ):
return run_command( cmd )
-def convert(t):
- t = int(t*100)
- return str(float(t)/100)
+##def convert(t):
+## t = int(t*100)
+## return str(float(t)/100)
def set_engines(N=0):
"""
@@ -255,10 +375,17 @@ def set_engines(N=0):
Sets the MC engines that are used in verification according to
if there are 4 or 8 processors. if if_no_bip = 1, we will not use any bip and reachx engines
"""
- global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip
+ global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip,bmcs1, if_no_bip, allpdrs,allbmcs
+ bmcs1 = [9] #BMC3
+ #for HWMCC we want to set N = 8
+ N = 8
if N == 0:
- N = n_proc = os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"])
- if N == 1:
+ N = n_proc = 1+os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"])
+## N = n_proc = 8 ### simulate 4 processors for HWMCC - turn this off a hwmcc.
+ else:
+ n_proc = N
+## print 'n_proc = %d'%n_proc
+ if N <= 1:
reachs = [24]
pdrs = [7]
## bmcs = [30]
@@ -266,33 +393,57 @@ def set_engines(N=0):
intrps = []
sims = []
slps = [18]
- elif N == 2:
+ elif N <= 2:
reachs = [24]
pdrs = [7]
bmcs = [30]
intrps = []
sims = []
slps = [18]
- elif N == 4:
- reachs = [24]
- pdrs = [34,7]
+ elif N <= 4:
+ reachs = [24] #reachy
+ pdrs = [7,34] #prdm pdr_abstract
if if_no_bip:
- allpdrs = pdrs = [7,19]
- bmcs = [9,30]
- intrps = [23]
- sims = []
- slps = [18]
- elif N == 8:
- reachs = [24]
- pdrs = [34,7,14]
- intrps = [23,1]
+ allpdrs = pdrs = [7,19] #pdrm pdrmm
+ bmcs = [9,30] #bmc3 bmc3 -S
+ intrps = [23] #unterp_m
+ sims = [26] #Rarity_sim
+ slps = [18] #sleep
+# 0.PDR, 1.INTERPOLATION, 2.BMC, 3.SIMULATION,
+# 4.REACHX, 5.PRE_SIMP, 6.simple, 7.PDRM, 8.REACHM, 9.BMC3
+# 10.Min_ret, 11.For_ret, 12.REACHP, 13.REACHN 14.PDRseed 15.prove_part_2,
+# 16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,
+# 21.run_parallel, 22.INTRP_bwd, 23.Interp_m 24.REACHY 25.REACHYc 26.Rarity Sim 27.simplify
+# 28.speculate, 29.quick_sec, 30.bmc3 -S, 31.BMC2 32.extract -a 33.extract 34.pdr_abstract
+# 35.par_scorr, 36.dsat, 37.iprove
+
+# BIPS = 0.PDR, 1.INTERPOLATION, 2.BMC, 14.PDRseed, 22.INTRP_bwd, 34.pdr_abstract
+# also reparam which uses ,reparam
+
+ elif N <= 8: #used for HWMCC
+ reachs = [24] #REACHY
+ allpdrs = pdrs = [7,34,14] #PDRM pdr_abstract PDR_seed
+ intrps = [23,1] #Interp_m
+ allbmcs = bmcs = [9,30,31] #BMC3 bmc3 -S
if if_no_bip:
- allpdrs = pdrs = [7,19]
- intrps = [23]
- bmcs = [9,30]
- sims = [26] #use default
- slps = [18]
-
+ allpdrs = pdrs = [7,19] #PDRM PDRMm
+ intrps = allintrps = [23] #Interp_m
+ bmcs = allbmcs = [2]
+ sims = [26] #Rarity_Sim
+ slps = [18] #sleep
+ else:
+ reachs = [24,4] #REACHY REACHX
+ pdrs = [7,34,14,19,0] #PDRM pdr_abstract PDR_seed PDRMm PDR
+ intrps = [23,1] #Interp_m INTERPOLATION
+ bmcs = allbmcs
+ if if_no_bip:
+ allpdrs = pdrs = [7,19] #PDRM PDRMm
+ intrps = allintrps = [23] #Interp_m
+ reachs = [24] #REACHY
+ bmcs = [9,30] #BMC3 bmc3 -S
+ sims = [26] #Rarity_Sim
+ slps = [18] #sleep
+
def set_globals():
"""This sets global parameters that are used to limit the resources used by all the operations
bmc, interpolation BDDs, abstract etc. There is a global factor 'x_factor' that can
@@ -349,6 +500,11 @@ def revert(f,n):
name = construct(l)
return name
+def n_eff_pos():
+ N=n_pos()
+ l=len(list_0_pos())
+ return N-l
+
def construct(l):
ll = l
name = ''
@@ -359,6 +515,17 @@ def construct(l):
def process_sat():
l = seq_name(f_name)
+def add_trace(s):
+ global m_trace
+ m_trace = m_trace + [s]
+
+def read_file_quiet_i(fname=None):
+ """ this preserves t_inter_start and is called internally by some functons."""
+ global t_iter_start
+ ts = t_iter_start
+ read_file_quiet(fname)
+ t_iter_start = ts
+
def read_file_quiet(fname=None):
"""This is the main program used for reading in a new circuit. The global file name is stored (f_name)
Sometimes we want to know the initial starting name. The file name can have the .aig extension left off
@@ -366,7 +533,7 @@ def read_file_quiet(fname=None):
Any time we want to process a new circuit, we should use this since otherwise we would not have the
correct f_name."""
global max_bmc, f_name, d_name, initial_f_name, x_factor, init_initial_f_name, win_list,seed, sec_options
- global win_list, init_simp, po_map
+ global win_list, init_simp, po_map, aigs, hist, init_initial_f_name
abc('fraig_restore') #clear out any residual fraig_store
set_engines() #temporary
init_simp = 1
@@ -393,22 +560,125 @@ def read_file_quiet(fname=None):
## run_command(s)
## print s
if s[-4:] == '.aig':
- run_command('&r %s;&put'%s) #warning: changes names to generic ones.
+## run_command('&r %s;&put'%s) #warning: changes names to generic ones.
+ run_command('r %s'%s)
+ run_command('zero')
else: #this is a blif file
run_command('r %s'%s)
abc('st;&get;&put') #changes names to generic ones for doing cec later.
run_command('zero;w %s.aig'%f_name)
set_globals()
+ hist = []
init_initial_f_name = initial_f_name = f_name
+ run_command('fold') #only does something if some of the outputs are constraints.
+ aigs_pp('push','initial')
+ #aigs = create push/pop history of aigs
+ #aigs.push() put the initial aig on the aig list.
print 'Initial f_name = %s'%f_name
- abc('addpi')
+ abc('addpi') #only does something if there are no PIs
#check_pos() #this removes constant outputs with a warning -
#needed when using iso. Need another fix for using iso.
- run_command('fold')
ps()
return
-
+
+def aigs_pp(op='push', typ='reparam'):
+ global hist,init_initial_f_name
+## print hist
+ if op == 'push':
+ hist.append(typ)
+ abc('w %s_aigs_%d.aig'%(init_initial_f_name,len(hist)))
+ if op == 'pop':
+ abc('cexsave') #protect current cex from a read
+ abc('r %s_aigs_%d.aig'%(init_initial_f_name,len(hist)))
+ abc('cexload')
+ typ = hist.pop()
+## print hist
+ return typ
+
+def scl():
+ abc('&get;&scl;&put')
+ ps()
+
+def cex_trim_g(F_init=0,tail=0,m=''):
+ abc('w %s_cex.aig'%f_name)
+ N=cex_frame()
+ G = N - tail
+ F = F_init
+ abc('cexsave')
+ while True:
+ print 'F = %d, G = %d'%(F,G)
+ abc('r %s_cex.aig'%f_name)
+ abc('cexload')
+ if m == '':
+ abc('cexcut -F %d -G %d'%(F,G))
+ else:
+ abc('cexcut -m -F %d -G %d'%(F,G))
+## abc('drw')
+## ps()
+ res = run_parallel(slps+bmcs,20)
+## run_command('bmc2 -v -T 20')
+## if is_sat(): #got a shortening of cex
+ if not res == Undecided:
+ Nb = cex_frame() #size of shortcut
+ abc('cexmerge -F %d -G %d'%(F,G))
+ abc('r %s_cex.aig'%f_name)
+ abc('cexload')
+ abc('testcex -a')
+ if cex_po() <0:
+ return 'ERROR2'
+ Nt=cex_frame() #current cex length
+ print 'Cex length reduced from %d to %d'%(N,Nt)
+ return
+ F = F + (G-F)/2
+## G = N - i*delta
+ if F >= G:
+ return
+
+def cex_trim(factor=1):
+ t_begin = time.time()
+ abc('w %s_cex.aig'%f_name)
+ N=cex_frame()
+ inc = min(N/10,100)
+ F = 0
+ G = inc
+ abc('cexsave')
+ abc('cexcut -n -F %d -G %d'%(F,G))
+ run_command('bmc2 -v -F %d -T 5'%(.9*inc))
+ inc = max(int(factor*n_bmc_frames()),2)
+ F = N - inc
+ G = N
+ print 'inc = %d'%inc
+ while True:
+ abc('r %s_cex.aig'%f_name)
+ abc('cexload')
+ abc('cexcut -n -F %d -G %d'%(F,G))
+## abc('drw')
+## ps()
+## run_command('bmc2 -v -F %d -T 20'%(.9*inc))
+ run_parallel(slps+bmcs,10)
+ if not is_sat():
+ abc('cex_load') #leave current cex in buffer
+ Nb = inc
+ else:
+ Nb = cex_frame() #size of shortcut
+ abc('cexmerge -F %d -G %d'%(F,G))
+ abc('r %s_cex.aig'%f_name)
+ abc('cexload')
+ abc('testcex -a')
+ if cex_po() <0:
+ return 'ERROR2'
+## abc('cexload')
+ Nt=cex_frame() #current cex length
+ print 'Cex length = %d'%Nt
+ G=F
+ F = max(0,F - inc)
+ print 'F = %d, G = %d'%(F,G)
+ if G <= 2:
+ abc('cexload')
+ print 'Time: %0.2f'%(time.time() - t_begin)
+ return
+
def read_file():
global win_list, init_simp, po_map
read_file_quiet()
@@ -420,6 +690,7 @@ def read_file():
def rf():
## set_engines(4) #temporary
read_file()
+ abc('zero')
def write_file(s):
"""this is the main method for writing the current circuit to an AIG file on disk.
@@ -444,7 +715,9 @@ def bmc_depth():
b = c-1
else:
b = n_bmc_frames()
- max_bmc = max(b,max_bmc)
+ if b > max_bmc:
+ max_bmc = b
+ report_bmc_depth(max_bmc)
return max_bmc
def null_status():
@@ -455,7 +728,12 @@ def set_max_bmc(b):
""" Keeps increasing max_bmc which is the maximum number of time frames for
which the current circuit is known to be UNSAT for"""
global max_bmc
- max_bmc = max(b,max_bmc)
+ if b > max_bmc:
+ max_bmc = b
+ report_bmc_depth(max_bmc)
+
+def report_bmc_depth(m):
+ print 'u%d'%m
def print_circuit_stats():
"""Stardard way of outputting statistice about the current circuit"""
@@ -468,7 +746,8 @@ def print_circuit_stats():
if a == -1:
a = n_nodes()
s = 'Nodes'
- b = max(max_bmc,bmc_depth())
+## b = max(max_bmc,bmc_depth()) # don't want to do this because bmc_depth can change max_bmc
+ b = max_bmc
c = cex_frame()
if b>= 0:
if c>=0:
@@ -483,9 +762,6 @@ def print_circuit_stats():
else:
print 'PIs=%d,POs=%d,FF=%d,%s=%d'%(i,o,l,s,a)
-def q():
- exit()
-
def is_unsat():
if prob_status() == 1:
return True
@@ -504,7 +780,7 @@ def wc(file):
def rc(file):
"""reads <file> so that if constraints are explicit, it will preserve them"""
- abc('&r %s;&put'%file)
+ abc('&r -s %s;&put'%file)
#more complex functions: ________________________________________________________
#, abstract, pba, speculate, final_verify, dprove3
@@ -512,16 +788,16 @@ def rc(file):
def timer(s):
btime = time.clock()
abc(s)
- print 'time = %f'%(time.clock() - btime)
+ print 'time = %0.2f'%(time.clock() - btime)
def med_simp():
x = time.time()
- abc("&get;&scl;&dc2;&lcorr;&dc2;&scorr;&fraig;&dc2;&put;dr")
+ abc("&get;&scl;&dc2;&lcorr;&dc2;&scorr;&fraig;&dc2;&put;dretime")
#abc("dc2rs")
ps()
- print 'time = %f'%(time.time() - x)
+ print 'time = %0.2f'%(time.time() - x)
-def simplify():
+def simplify_old(M=0):
"""Our standard simplification of logic routine. What it does depende on the problem size.
For large problems, we use the &methods which use a simple circuit based SAT solver. Also problem
size dictates the level of k-step induction done in 'scorr' The stongest simplification is done if
@@ -531,34 +807,47 @@ def simplify():
abc('&get;&scl;&lcorr;&put')
p_40 = False
n =n_ands()
- if n >= 70000:
- abc('&get;&scorr -C 0;&put')
+ if n >= 70000 and not '_smp' in f_name:
+## abc('&get;&scorr -C 0;&put')
+ scorr_T(30)
+ ps()
n =n_ands()
if n >= 100000:
abc('&get;&scorr -k;&put')
- if (70000 < n and n < 100000):
+ ps()
+ if (70000 < n and n < 150000):
+## print '1'
p_40 = True
- abc("&get;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&fraig;&dc2;&put;dr")
+ abc("&get;&dc2;&put;dretime;&get;&lcorr;&dc2;&put;dretime;&get;&scorr;&fraig;&dc2;&put;dretime")
+## print 2'
+ ps()
n = n_ands()
## if n<60000:
if n < 80000:
abc("&get;&scorr -F 2;&put;dc2rs")
+ ps()
else: # n between 60K and 100K
abc("dc2rs")
+ ps()
n = n_ands()
## if (30000 < n and n <= 40000):
if (60000 < n and n <= 70000):
if not p_40:
- abc("&get;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&fraig;&dc2;&put;dr")
+ abc("&get;&dc2;&put;dretime;&get;&lcorr;&dc2;&put;dretime;&get;&scorr;&fraig;&dc2;&put;dretime")
abc("&get;&scorr -F 2;&put;dc2rs")
+ ps()
else:
- abc("dc2rs")
+ abc("dc2rs")
+ ps()
n = n_ands()
## if n <= 60000:
if n <= 70000:
- abc('scl -m;drw;dr;lcorr;drw;dr')
+ abc('scl -m;drw;dretime;lcorr;drw;dretime')
+ ps()
nn = max(1,n)
m = int(min( 70000/nn, 16))
+ if M > 0:
+ m = M
if m >= 1:
j = 1
while j <= m:
@@ -567,7 +856,80 @@ def simplify():
abc('dc2')
else:
abc('dc2rs')
- abc('scorr -C 5000 -F %d'%j)
+ abc('scorr -C 1000 -F %d'%j) #was 5000 temporarily 1000
+ if check_size():
+ break
+ j = 2*j
+ print 'ANDs=%d,'%n_ands(),
+ if n_ands() >= .98 * nands:
+ break
+ continue
+ if not check_size():
+ print '\n'
+ return get_status()
+
+def simplify(M=0,N=0):
+ """Our standard simplification of logic routine. What it does depende on the problem size.
+ For large problems, we use the &methods which use a simple circuit based SAT solver. Also problem
+ size dictates the level of k-step induction done in 'scorr' The stongest simplification is done if
+ n_ands < 20000. Then it used the clause based solver and k-step induction where |k| depends
+ on the problem size
+ Does not change #PIs.
+ """
+ global smp_trace
+ set_globals()
+ smp_trace = smp_trace + ['&scl;&lcorr']
+ abc('&get;&scl;&lcorr;&put')
+ p_40 = False
+ n =n_ands()
+ if N == 0 and n >= 70000 and not '_smp' in f_name:
+## abc('&get;&scorr -C 0;&put')
+## print 'Trying scorr_T'
+ scorr_T(30)
+ ps()
+ n =n_ands()
+ if n >= 100000:
+ smp_trace = smp_trace + ['&scorr']
+ abc('&get;&scorr -k;&put')
+ ps()
+ if (70000 < n and n < 150000):
+ p_40 = True
+ smp_trace = smp_trace + ['&dc2;dretime;&lcorr;&dc2;dretime;&scorr;&fraig;&dc2;dretime']
+ abc("&get;&dc2;&put;dretime;&get;&lcorr;&dc2;&put;dretime;&get;&scorr;&fraig;&dc2;&put;dretime")
+ ps()
+ n = n_ands()
+## if (30000 < n and n <= 40000):
+ if (60000 < n and n <= 70000):
+ if not p_40:
+ smp_trace = smp_trace + ['&dc2;dretime;&lcorr;&dc2;dretime;&scorr;&fraig;&dc2;dretime']
+ abc("&get;&dc2;&put;dretime;&get;&lcorr;&dc2;&put;dretime;&get;&scorr;&fraig;&dc2;&put;dretime")
+ smp_trace = smp_trace + ['&scorr -F 2;dc2rs']
+ abc("&get;&scorr -F 2;&put;dc2rs")
+ ps()
+ else:
+ abc("dc2rs")
+ smp_trace = smp_trace + ['dc2rs']
+ ps()
+ n = n_ands()
+## if n <= 60000:
+ if n <= 70000:
+ smp_trace = smp_trace + ['scl -m;drw;dretime;lcorr;drw;dretime']
+ abc('scl -m;drw;dretime;lcorr;drw;dretime')
+ ps()
+ nn = max(1,n)
+ m = int(min( 70000/nn, 16))
+ if M > 0:
+ m = M
+ if N == 0 and m >= 1:
+ j = 1
+ while j <= m:
+ set_size()
+ if j<8:
+ abc('dc2')
+ else:
+ abc('dc2rs')
+ smp_trace = smp_trace + ['scorr -F %d'%j]
+ abc('scorr -C 1000 -F %d'%j) #was 5000 temporarily 1000
if check_size():
break
j = 2*j
@@ -579,30 +941,32 @@ def simplify():
print '\n'
return get_status()
-def simulate2(t):
+def simulate2(t=900):
"""Does rarity simulation. Simulation is restricted by the amount
of memory it might use. At first wide but shallow simulation is done, followed by
successively more narrow but deeper simulation.
seed is globally initiallized to 113 when a new design is read in"""
global x_factor, f_name, tme, seed
btime = time.clock()
+ tt = time.time()
diff = 0
while True:
- f = 100
- w = 16
+ f = 20
+ w = 64
b = 16
- r = 500
+ r = 700
for k in range(9): #this controls how deep we go
f = min(f*2, 3500)
w = max(((w+1)/2)-1,1)
abc('sim3 -F %d -W %d -N %d -R %d -B %d'%(f,w,seed,r,b))
seed = seed+23
if is_sat():
+## print 'RareSim time = %0.2f at frame %d'%((time.time() - tt),cex_frame())
return 'SAT'
if ((time.clock()-btime) > t):
return 'UNDECIDED'
-def simulate(t):
+def simulate(t=900):
abc('&get')
result = eq_simulate(t)
return result
@@ -624,7 +988,7 @@ def eq_simulate(t):
r = f/20
w = max(((w+1)/2)-1,1)
## abc('&sim3 -R %d -W %d -N %d'%(r,w,seed))
- abc('&sim -F %d -W %d -R %d'%(f,w,seed))
+ abc('&sim -F %d -W %d -N %d'%(f,w,seed))
seed = seed+23
if is_sat():
return 'SAT'
@@ -632,108 +996,159 @@ def eq_simulate(t):
return 'UNDECIDED'
def generate_abs(n):
- """generates an abstracted model (gabs) from the greg file. The gabs file is automatically
- generated in the & space by &abs_derive. We store it away using the f_name of the problem
+ """generates an abstracted model (gabs) from the greg file or gla. The gabs file is automatically
+ generated in the & space by &abs_derive or gla_derive. We store it away using the f_name of the problem
being solved at the moment. The f_name keeps changing with an extension given by the latest
operation done - e.g. smp, abs, spec, final, group. """
global f_name
- #we have a cex and we use this generate a new gabs file
- abc('&r %s_greg.aig; &abs_derive; &put; w %s_gabs.aig'%(f_name,f_name)) # do we still need the gabs file
+ #we have a cex and we use this generate a new gabs (gla) file
+ if gabs: #use the register refinement method
+ abc('&r -s %s_greg.aig; &abs_derive; &put; w %s_gabs.aig'%(f_name,f_name)) # do we still need the gabs file
+ else: #use the gate refinement method
+ run_command('&r -s %s_gla.aig; &gla_derive; &put'%f_name)
+ if n_ands() < 2000:
+ run_command('scl;scorr;dretime')
+ run_command('w %s_gabs.aig'%f_name)
if n == 1:
#print 'New abstraction: ',
ps()
return
def refine_with_cex():
- """Refines the greg (which contains the original problem with the set of FF's that have been abstracted).
- This uses the current cex to modify the greg file to reflect which regs are in the
+ """Refines the greg or gla file (which contains the original problem with the set of FF's or gates
+ that have been abstracted).
+ This uses the current cex to modify the greg or gla file to reflect which regs(gates) are in the
new current abstraction"""
global f_name
- #print 'CEX in frame %d for output %d'%(cex_frame(),cex_po())
- #abc('&r %s_greg.aig; &abs_refine -t; &w %s_greg.aig'%(f_name,f_name))
- abc('&r %s_greg.aig;&w %s_greg_before.aig'%(f_name,f_name))
-## run_command('&abs_refine -s -M 25; &w %s_greg.aig'%f_name)
- run_command('&abs_refine -s; &w %s_greg.aig'%f_name)
- #print ' %d FF'%n_latches()
+ if gabs:
+ abc('&r -s %s_greg.aig;&w %s_greg_before.aig'%(f_name,f_name))
+ run_command('&abs_refine -s; &w %s_greg.aig'%f_name)
+ else:
+ run_command('&r -s %s_gla.aig;&w %s_gla_before.aig'%(f_name,f_name))
+ run_command('&gla_refine; &w %s_gla.aig'%f_name)
return
-def iter_tempor():
- na = n_ands()
+def refine_with_cex_suffix():
+ """Refines the greg or gla file (which contains the original problem with the set of FF's or gates
+ that have been abstracted).
+ This uses the current cex to modify the greg or gla file to reflect which regs(gates) are in the
+ new current abstraction"""
+ global f_name
+ return Undecided_no_reduction
+ t = 5
+ cexf = cex_frame()
+ suf = .9*cexf
+ run_command('write_status %s_temp.status'%f_name)
+ ub = int(cexf -min(10, .02*cexf))
+ lb = int(min(10,.02*cexf))
+ suf = int(.5*(ub-lb))
+ if_last = 0
+ N = 0
while True:
- abc('w save.aig')
- npi = n_pis()
- print npi
- abc('tempor -T 5 -F 8')
- print 'tempor = ',
- ps()
- x = trim()
-## if n_pis() > 2*npi:
-## abc('r save.aig')
-## return 'UNDECIDED'
- abc('dr')
- print 'retime = ',
- ps()
- simplify()
- trim()
- print 'simplify -> trim = ',
- ps()
- if n_ands() > na:
- abc('r save.aig')
- ps()
- print 'No improvement'
- return 'UNDECIDED'
- na = n_ands()
- ps()
- if n_latches() == 0:
- return RESULT[check_sat()]
-
+ N = N+1
+ tt = time.time()
+ run_command('read_status %s_temp.status'%f_name)
+ print 'Refining using suffix %d with time = %d'%(suf,t)
+ run_command('&r -s %s_gla.aig;&w %s_gla_before.aig'%(f_name,f_name))
+ F = create_funcs([18],t) #create a timer function with timeout = t
+ F = F + [eval('(pyabc_split.defer(abc)("&gla_refine -F %d; &w %s_gla.aig"))'%(suf,f_name))]
+ for i,res in pyabc_split.abc_split_all(F): #need to do a binary search
+ if i == 0: #timeout
+ lb = int(suf)
+ dec = 'increasing'
+ break
+ elif same_abs(): #suffix did not refine - need to decrease suf
+ ub = int(suf)
+ dec = 'decreasing'
+ break
+ else: #refinement happened
+ print 'refinement happened.'
+ return
+ print 'ub = %.2f, lb = %.2f, suf = %.2f'%(ub,lb,suf)
+ suf = int(lb+.5*(ub-lb))
+ if (ub-lb)< (max(1.1,min(10,.02*cexf))) or if_last or N >=4: # not refining in time allowed, give up
+ print '(ub-lb) = %0.2f'%(ub-lb)
+ print 'could not refine in resources allowed'
+ return Undecided_no_reduction
+
+def same_abs():
+ run_command('r %s_gabs.aig'%f_name)
+ set_size()
+## ps()
+ run_command('&r -s %s_gla.aig; &gla_derive; &put'%f_name)
+ if n_ands() < 2000:
+ run_command('scl;scorr;dretime')
+## ps()
+ return check_size()
def abstraction_refinement(latches_before,NBF,ratio=.75):
"""Subroutine of 'abstract' which does the refinement of the abstracted model,
- using counterexamples found by BMC or BDD reachability"""
+ using counterexamples found by BMC, BDD reachability, etc"""
global x_factor, f_name, last_verify_time, x, win_list, last_winner, last_cex, t_init, j_last, sweep_time
- global cex_list, last_cx
+ global cex_list, last_cx, abs_ref_time
sweep_time = 2
+ T1 = time.time()
if NBF == -1:
F = 2000
else:
F = 2*NBF
print '\nIterating abstraction refinement'
+ add_trace('abstraction refinement')
J = slps+intrps+pdrs+bmcs+sims
+ J=modify_methods(J)
print sublist(methods,J)
last_verify_time = t = x_factor*max(50,max(1,2.5*G_T))
- t = 1000 #temporary
+## t = 1000 #temporary
t = abs_time
initial_verify_time = last_verify_time = t
reg_verify = True
print 'Verify time set to %d'%last_verify_time
while True: #cex based refinement
- generate_abs(1) #generate new gabs file from refined greg file
+ generate_abs(1) #generate new gabs file from refined greg or gla file
set_globals()
latches_after = n_latches()
if small_abs(ratio):
+ print 'abstraction too large'
+ return Undecided_no_reduction
+ if (time.time() - T1)> abs_ref_time:
+ print 'abstraction time ran out'
break
t = last_verify_time
yy = time.time()
abc('w %s_beforerpm.aig'%f_name)
rep_change = reparam() #new - must do reconcile after to make cex compatible
+## if rep_change:
+## add_trace('reparam')
abc('w %s_afterrpm.aig'%f_name)
## if reg_verify:
status = verify(J,t)
+ print 'status = ',
+ print status
## else:
## status = pord_1_2(t)
###############
- if status == Sat_true:
+ if status[0] == Sat_true:
print 'Found true cex'
- reconcile(rep_change)
+ reconcile_a(rep_change)
+## add_trace('SAT by %s'%status[1])
return Sat_true
- if status == Unsat:
- return status
- if status == Sat:
+ if status[0] == Unsat:
+## add_trace('UNSAT by %s'%status[1])
+ return Unsat
+ if status[0] == Sat:
+## add_trace('SAT by %s'%status[1])
abc('write_status %s_after.status'%f_name)
- reconcile(rep_change) # makes the cex compatible with original before reparam and puts original in work space
+ reconcile_a(rep_change) # makes the cex compatible with original before reparam and puts original in work space
abc('write_status %s_before.status'%f_name)
- refine_with_cex()
+ if gabs: #global variable
+ refine_with_cex()
+ else:
+ result = refine_with_cex_suffix()
+ if result == Sat:
+ return Sat
+## result = refine_with_cex()
+ if result == Undecided_no_reduction:
+ return result
if is_sat(): # if cex can't refine, status is set to Sat_true
print 'Found true cex in output %d'%cex_po()
return Sat_true
@@ -746,8 +1161,9 @@ def abstraction_refinement(latches_before,NBF,ratio=.75):
def small_abs(ratio=.75):
""" tests is the abstraction is too large"""
- return ((rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1)
- or (n_latches() >= ratio*latches_before_abs))
+## return ((rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1)
+## or (n_latches() >= ratio*latches_before_abs))
+ return (n_latches() >= ratio*latches_before_abs)
##def abstract(if_bip=True):
## global ratio
@@ -793,7 +1209,7 @@ def abstractb():
NBF = bmc_depth()
print 'Abstraction good to %d frames'%max_bmc
#note when things are done in parallel, the &aig is not restored!!!
- abc('&r %s_greg.aig; &w initial_greg.aig; &abs_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
+ abc('&r -s %s_greg.aig; &w initial_greg.aig; &abs_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
set_max_bmc(NBF)
print 'Initial abstraction: ',
ps()
@@ -804,7 +1220,7 @@ def abstractb():
## (latches_after >= .75*latches_before_abs)):
if small_abs(abs_ratio):
abc('r %s_before_abs.aig'%f_name)
- print "Little reduction!"
+ print "Too little reduction!"
print 'Abstract time wasted = %0.2f'%(time.time()-tt)
return Undecided_no_reduction
sims_old = sims
@@ -816,9 +1232,9 @@ def abstractb():
## if n_latches() >= .90*latches_before_abs:
## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
## if rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1:
- if small_abs(abs_ratio): #r is ratio of final to initial latches in absstraction. If greater then True
+ if small_abs(abs_ratio) or result == Undecided_no_reduction: #r is ratio of final to initial latches in abstraction. If greater then True
abc('r %s_before_abs.aig'%f_name) #restore original file before abstract.
- print "Little reduction! ",
+ print "Too little reduction! ",
print 'Abstract time wasted = %0.2f'%(time.time()-tt)
result = Undecided_no_reduction
return result
@@ -943,7 +1359,7 @@ def abs_m():
def n_abs_latches():
abc('&w pba_temp.aig') #save the &space
abc('&abs_derive;&put')
- abc('&r pba_temp.aig')
+ abc('&r -s pba_temp.aig')
return n_latches()
def pba_loop(F):
@@ -952,7 +1368,7 @@ def pba_loop(F):
run_command('&abs_pba -v -C 0 -F %d'%F)
abc('&w pba_temp.aig')
abc('&abs_derive;&put')
- abc('&r pba_temp.aig')
+ abc('&r -s pba_temp.aig')
N = n_latches()
## if n == N or n == N+1:
## break
@@ -991,13 +1407,13 @@ def ss(options=''):
if result > 2: #save the result and read in with /rf so that files are initialized correctly
if not '_spec' in f_name:
write_file('spec') #make sure we do not overwrite original file
- read_file_quiet('%s'%f_name) #this resets f_name and initial_f_name etc.
+ read_file_quiet_i('%s'%f_name) #this resets f_name and initial_f_name etc.
print '\n*************Executing super_prove ************'
print 'New f_name = %s'%f_name
result = sp()
- if result == 'SAT':
+ if result[0] == 'SAT':
result = 'UNDECIDED' #because speculation was done initially.
- elif result == 1:
+ elif result[0] == 1:
result = 'SAT'
else:
result = RESULT[result]
@@ -1025,13 +1441,13 @@ def pre_sec():
## trim_allowed = False
## print 'trim allowed = ',trim_allowed
print 'First file: ',
- read_file_quiet() #note - reads into & space and then does &put
+ read_file_quiet_i() #note - reads into & space and then does &put
ps()
prs(False)
ps()
abc('&w Old.aig')
print 'Second file: ',
- read_file_quiet()
+ read_file_quiet_i()
ps()
prs(False)
ps()
@@ -1078,7 +1494,7 @@ def sec(B_part,options):
result = RESULT[result]
else:
result = sp()
- if result == 'SAT':
+ if result[0] == 'SAT':
result = 'UNDECIDED'
print 'Total time = %d'%(time.time() - yy)
return result
@@ -1089,14 +1505,16 @@ def filter(opts):
""" This is for filter which effectively only recognizes options -f -g"""
if (opts == '' or opts == 'l'): #if 'l' this is used only for initial &equiv2 to get initial equiv creation
return
+ print 'filter = %s '%opts,
if opts == 'ab':
print A_name ,
print B_name
- run_command('&ps')
+## run_command('&ps')
run_command('&filter -f %s.aig %s.aig'%(A_name,B_name))
return
- if not opts == 'f':
- opts = 'g'
+#### if not opts == 'f':
+#### opts = 'g'
+## print 'filter = %
run_command('&filter -%s'%opts)
def check_if_spec_first():
@@ -1108,11 +1526,11 @@ def check_if_spec_first():
abc('&w check_and.aig')
abc("&get; &equiv3 -v -F 20 -T %f -R %d"%(t,5*r))
filter('g')
- abc("&srm; r gsrm.aig")
+ abc("&srm -A %s_gsrm.aig; r %s_gsrm.aig"%(f_name,f_name))
print 'Estimated # POs = %d for initial speculation'%n_pos()
result = n_pos() > max(50,.25*n_latches())
abc('r check_save.aig')
- abc('&r check_and.aig')
+ abc('&r -s check_and.aig')
return result
def initial_speculate(sec_opt=''):
@@ -1120,17 +1538,31 @@ def initial_speculate(sec_opt=''):
set_globals()
if sec_options == '':
sec_options = sec_opt
- t = max(1,.5*G_T)
+ # 1000 - 15, 5000 - 25, 10000 - 30, 50000 - 50
+ na = n_ands()
+## t = max(1,G_T)
+ if na < 1000:
+ t =20
+ elif na < 5000:
+ t = 20 + ((na-1000)/4000)*20
+ elif na < 10000:
+ t = 40 + ((na-5000)/5000)*20
+ elif na < 50000:
+ t = 60 + ((na-40000)/40000)*15
+ else:
+ t = 75
r = max(1,int(t))
+ rounds = 30*r
print 'Initial sec_options = %s'%sec_options
- if sec_options == 'l':
- cmd = "&get; &equiv3 -lv -F 20 -T %f -R %d"%(3*t,15*r)
- else:
- cmd = "&get; &equiv3 -v -F 20 -T %f -R %d"%(3*t,15*r)
+## if sec_options == 'l':
+## cmd = "&get; &equiv3 -lv -F 20 -T %f -R %d -S %d"%(3*t,rounds,rounds/20)
+## else:
+## cmd = "&get; &equiv3 -v -F 20 -T %f -R %d -S %d"%(3*t,rounds,rounds/20)
+ cmd = "&get; &equiv3 -v -F 20 -T %d -R %d -S %d"%(int(t),0,0) #####XXX
print cmd
abc(cmd)
## print 'AND space after &equiv3: ',
- run_command('&ps')
+## run_command('&ps')
if (sec_options == 'l'):
if sec_sw:
sec_options = 'ab'
@@ -1140,6 +1572,7 @@ def initial_speculate(sec_opt=''):
## run_command('r %s.aig;ps'%A_name)
## print 'B_name: ',
## run_command('r %s.aig;ps'%B_name)
+ print 'filtering'
filter(sec_options)
abc('&w initial_gore.aig')
## print 'Running &srm'
@@ -1153,35 +1586,51 @@ def initial_speculate(sec_opt=''):
cmd = "&srm2 -%s %s.aig %s.aig; r gsrm.aig; w %s_gsrm.aig; &w %s_gore.aig"%(sec_options,A_name,B_name,f_name,f_name)
abc(cmd)
po_map = range(n_pos())
- return
+ return sec_options
else:
## abc('&r %s_gore.aig; &srm ; r gsrm.aig; w %s_gsrm.aig'%(f_name,f_name))
- cmd = "&srm; r gsrm.aig; w %s_gsrm.aig; &w %s_gore.aig"%(f_name,f_name)
-## print 'Running %s'%cmd
+ cmd = "&srm -A %s_gsrm.aig; r %s_gsrm.aig; &w %s_gore.aig"%(f_name,f_name,f_name)
+ print 'Running %s'%cmd
abc(cmd)
print 'done with &srm'
po_map = range(n_pos())
- if sec_options == '':
- if n_pos() > 200:
+ if sec_options == '' or sec_options == 'g':
+## if n_pos() > 10000:###temp
+ if n_eff_pos() > 1000: ##### Temporary
sec_options = 'g'
print 'sec_options set to %s'%'g'
- abc('&r %s_gore.aig'%f_name)
+ abc('&r -s %s_gore.aig'%f_name)
filter(sec_options)
- print 'Running &srm'
- cmd = "&srm; r gsrm.aig; ps;w %s_gsrm.aig;&ps; &w %s_gore.aig"%(f_name,f_name)
- print 'Running %s'%cmd
+## print 'Running &srm'
+ cmd = "&srm -A %s_gsrm.aig; r %s_gsrm.aig; &w %s_gore.aig"%(f_name,f_name,f_name)
+## print 'Running %s'%cmd
abc(cmd)
po_map = range(n_pos())
+ if n_eff_pos() > 500:
+## if n_pos() > 20000:####temp
+ sec_options = 'f'
+ print 'sec_options set to %s'%'f'
+ abc('&r -s %s_gore.aig'%f_name)
+ filter(sec_options)
+ print 'Running &srm'
+ cmd = "&srm -A %s_gsrm.aig; r %s_gsrm.aig; &w %s_gore.aig"%(f_name,f_name,f_name)
+ print 'Running %s'%cmd
+ abc(cmd)
+ po_map = range(n_pos())
+ return sec_options
+
+## if n_pos() > 2000:
+## return sec_options
+
-
def test_against_original():
'''tests whether we have a cex hitting an original PO'''
abc('&w %s_save.aig'%f_name) #we preserve whatever was in the & space
- abc('&r %s_gore.aig'%f_name) #This is the original
- abc('testcex')
+ abc('&r -s %s_gore.aig'%f_name) #This is the original
+ abc('testcex') #test the cex against the &space
PO = cex_po()
## print 'test_against original gives PO = %d'%PO
- abc('&r %s_save.aig'%f_name)
+ abc('&r -s %s_save.aig'%f_name)
if PO > -1:
## print 'cex fails an original PO'
return True
@@ -1211,28 +1660,40 @@ def set_cex_po(n=0):
else:
abc('testcex') #test &space
PO = cex_po()
-## print '2. cex PO = %d'%PO
+ print '2. cex PO = %d'%PO
cx = cex_get()
if PO > -1:
if test_against_original(): #this double checks that it is really an original PO
cex_put(cx)
-## print 'test_against_original was valid'
+ print 'test_against_original was valid'
return PO
else:
- print '1. PO not valid'
+ print '1. PO is not valid'
return -1 #error
if PO < 0 or PO >= (n_pos_before - n_pos_proved): # not a valid cex because already tested outside original.
+## print 'cex_po = %d, n_pos_before = %d, n_pos_proved = %d'%(PO, n_pos_before, n_pos_proved)
print '2. PO is not valid'
PO = -1 #error
## print '3. cex PO = %d'%PO
return PO
-def speculate():
+def cex_stats():
+ print 'cex_pis = %d, cex_regs = %d, cex_po = %d, cex_frame = %d'%(n_cex_pis(),n_cex_regs(),cex_po(),cex_frame())
+
+def speculate(t=0):
"""Main speculative reduction routine. Finds candidate sequential equivalences and refines them by simulation, BMC, or reachability
using any cex found. """
global G_C,G_T,n_pos_before, x_factor, n_latches_before, last_verify_time, trim_allowed, n_pos_before
global t_init, j_last, sec_sw, A_name, B_name, sec_options, po_map, sweep_time, sims, cex_list, n_pos_proved,ifpord1
- global last_cx
+ global last_cx, total_spec_refine_time, skip_spec
+## print 'sec_options = %s'%sec_options
+ if skip_spec:
+ return Undecided_no_reduction
+ add_trace('speculate')
+ if t > 0:
+ total_spec_refine_time = t
+ abc('scl') #make sure no dangling flops
+ abc('orpos')
last_cx = 0
ifpord1 = 1
initial_po_size = last_srm_po_size = n_pos()
@@ -1240,17 +1701,40 @@ def speculate():
if sec_sw:
print 'A_name = %s, B_name = %s, f_name = %s, sec_options = %s'%(A_name, B_name, f_name, sec_options)
elif n_ands()> 36000 and sec_options == '':
+## add_trace('sec options g')
sec_options = 'g'
print 'sec_options set to "g"'
+## add_trace('sec_options ="g"')
def refine_with_cex():
"""Refines the gore file to reflect equivalences that go away because of cex"""
global f_name
- abc('write_status %s_before.status'%f_name)
- abc('&r %s_gore.aig; &resim -m'%f_name)
+ abc('write_status %s_before_refine.status'%f_name)
+ abc('&r -s %s_gore.aig; &resim -m'%f_name)
+## run_command('&ps')
+## cex_stats()
filter(sec_options)
run_command('&w %s_gore.aig'%f_name)
return
+
+ def refine_without_cex(L=[]):
+ """removes the POs in the current SRM in the list L. Alters the equivalence classes in the
+ gore file accordingly.
+ """
+ global f_name
+ if L == []:
+ return
+ print 'Entered refine_without_cex'
+ abc('write_status %s_before_refine.status'%f_name)
+ create_abc_array(L)
+ print 'wrote array'
+ abc('&r -s %s_gore.aig; &equiv_filter'%f_name)
+ print 'filtered gore using L'
+ filter(sec_options)
+ print 'filtered with %s'%sec_options
+ run_command('&w %s_gore.aig'%f_name)
+ return
+
def set_cex(lst):
""" assumes only one in lst """
@@ -1261,7 +1745,48 @@ def speculate():
else:
cex_put(cx)
break
-
+ def retry(t):
+ add_trace('retrying')
+ print 'retrying winner cex which did not refine'
+ abc('r %s_gsrm_before.aig'%f_name) #restore previous gsrm
+ abc('w %s_beforerpm.aig'%f_name)
+ rep_change = reparam() #must be paired with reconcile below if cex
+ if rep_change:
+ add_trace('reparam')
+ abc('w %s_afterrpm.aig'%f_name)
+ if last_winner == 'RareSim':
+ simulate2(t)
+ elif last_winner == 'PDR':
+ pdr(t)
+ elif last_winner == 'BMC':
+ bmc(t)
+ elif last_winner == 'INTRP':
+ intrp(t)
+ elif last_winner == 'PDRM':
+ pdrm(t)
+ elif last_winner == 'BMC3':
+ bmc3(t)
+ elif last_winner == 'PDR_sd':
+ pdrseed(t)
+ elif last_winner == 'PDRM_sd':
+ pdrmm(t)
+ elif last_winner == 'INTRPm':
+ intrpm(t)
+ elif last_winner == 'REACHY':
+ reachy(t)
+ elif last_winner == 'BMC_J':
+ bmc_j(t)
+ elif last_winner == 'PDRa':
+ pdra(t)
+ else:
+ reconcile(rep_change)
+ return False
+ reconcile(rep_change)
+ if not is_sat():
+ return False
+ abc('&r -s %s_gore_before.aig ;&w %s_gore.aig'%(f_name,f_name)) #restore old gore file
+ return True
+
def generate_srm():
"""generates a speculated reduced model (srm) from the gore file"""
global f_name, po_map, sec_sw, A_name, B_name, sec_options, n_pos_proved
@@ -1270,15 +1795,15 @@ def speculate():
ab = n_ands()
abc('w %s_oldsrm.aig'%f_name) #save for later purposes
if sec_sw:
- run_command('&r %s_gore.aig; &srm2 -%s %s.aig %s.aig; r gsrm.aig; w %s_gsrm.aig'%(f_name,sec_options,A_name,B_name,f_name))
+ run_command('&r -s %s_gore.aig; &srm2 -%s %s.aig %s.aig; r gsrm.aig; w %s_gsrm.aig'%(f_name,sec_options,A_name,B_name,f_name))
else:
- abc('&r %s_gore.aig; &srm ; r gsrm.aig; w %s_gsrm.aig'%(f_name,f_name)) #do we still need to write the gsrm file
+ abc('&r -s %s_gore.aig; &srm -A %s_gsrm.aig ; r %s_gsrm.aig'%(f_name,f_name,f_name)) #do we still need to write the gsrm file
## ps()
po_map = range(n_pos())
ps()
n_pos_proved = 0
return 'OK'
-
+
n_pos_before = n_pos()
n_pos_proved = 0
n_latches_before = n_latches()
@@ -1289,10 +1814,18 @@ def speculate():
j_last = 0
J = slps+sims+pdrs+bmcs+intrps
J = modify_methods(J,1)
- funcs = [eval('(pyabc_split.defer(initial_speculate)())')]
+ print 'sec_options = %s'%sec_options
+ funcs = [eval('(pyabc_split.defer(initial_speculate)("%s"))'%sec_options)]
funcs = create_funcs(J,10000)+funcs #want other functins to run until initial speculate stops
mtds = sublist(methods,J) + ['initial_speculate'] #important that initial_speculate goes last
- fork_last(funcs,mtds)
+ print mtds
+ res = fork_last(funcs,mtds)
+ print 'init_spec return = ',
+ print res
+ if res[1] in ['f','g','']:
+ sec_options = res[1]
+ add_trace('sec_options = %s'%sec_options)
+ add_trace('Number of POs: %d'%n_pos())
## ps()
if is_unsat():
return Unsat
@@ -1300,16 +1833,33 @@ def speculate():
return Sat_true
if n_pos_before == n_pos():
print 'No new outputs. Quitting speculate'
+ add_trace('de_speculate')
return Undecided_no_reduction # return result is unknown
-## cmd = 'lcorr;&get;&trim -i;&put;w %s_gsrm.aig'%f_name
- #print 'Executing %s'%cmd
- abc('w initial_gsrm.aig')
-## ps()
-## abc(cmd)
+ if n_eff_pos() > 1999000:
+ print 'Too many POs'
+ add_trace('de_speculate')
+ return Undecided_no_reduction
print 'Initial speculation: ',
ps()
+ abc('w %s_initial_gsrm.aig'%f_name)
+ if n_pos() > 1000:
+ print 'Too many new outputs. Quitting speculate'
+ add_trace('de_speculate')
+ return Undecided_no_reduction # return result is unknown
+ if n_pos() <= n_pos_before + 2:
+ print 'Too few new outputs. Quitting speculate'
+ add_trace('de_speculate')
+ return Undecided_no_reduction # return result is unknown
if n_latches() == 0:
return check_sat()
+ if use_pms:
+ p,q,r=par_multi_sat(0)
+ q = indices(r,1)
+ print sumsize(r)
+ if count_less(r,1) < .25*len(r):
+ print 'too many POs are already SAT'
+ add_trace('de_speculate')
+ return Undecided_no_reduction
if sec_options == 'l' and sec_sw:
sec_options = 'ab' #finished with initial speculate with the 'l' option
print "sec_options set to 'ab'"
@@ -1322,6 +1872,7 @@ def speculate():
if is_sat():
return Sat_true
simp_sw = init = True
+ add_trace('speculative refinement')
print '\nIterating speculation refinement'
sims_old = sims
sims = sims[:1]
@@ -1330,19 +1881,37 @@ def speculate():
## print sublist(methods,J)
t = max(50,max(1,2*G_T))
last_verify_time = t
+ ### temp
+ last_verify_time = total_spec_refine_time
+ ###
print 'Verify time set to %d'%last_verify_time
reg_verify = True
ref_time = time.time()
sweep_time = 2
ifpord1=1
- while True: # refinement loop
+ par_verify = re_try = False
+## total_spec_refine_time = 150
+ while True: ##################### refinement loop
set_globals()
yy = time.time()
+ time_used = (yy-ref_time)
+ print 'Time_used = %0.2f'%time_used
+ if time_used > total_spec_refine_time:
+ print 'Allotted speculation refinement time is exceeded'
+ add_trace('de_speculate')
+ return Undecided_no_reduction
if not init:
abc('r %s_gsrm.aig'%f_name) #this is done only to set the size of the previous gsrm.
abc('w %s_gsrm_before.aig'%f_name)
set_size()
result = generate_srm()
+ if n_pos() <= n_pos_before + 1: #heuristic that if only have one equivalence, then not worth it
+ abc('r %s.aig'%f_name) #revert to previous aig
+ sims = sims_old
+ print 'UNDECIDED'
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
+ add_trace('de_speculate')
+ return Undecided_no_reduction
last_srm_po_size = n_pos()
yy = time.time()
# if the size of the gsrm did not change after generating a new gsrm
@@ -1351,18 +1920,30 @@ def speculate():
if check_size(): #same size before and after
if check_cex(): #valid cex failed to refine possibly
if 0 <= cex_po() and cex_po() < (n_pos_before - n_pos_proved): #original PO
- print 'Found cex in original output = %d'%cex_po()
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Found cex in original output number = %d'%cex_po()
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Sat_true
elif check_same_gsrm(f_name): #if two gsrms are same, then failed to refine
print 'CEX failed to refine'
+ add_trace('de_speculate')
return Error
else:
print 'not a valid cex'
- return Error
+ print 'Last winner = %s'%last_winner
+ print 're_try = %d'%re_try
+ if re_try:
+ add_trace('de_speculate')
+ return Error #abort speculation
+ re_try = True
+ else:
+ re_try = False # just got a valid refinement so reset.
if n_latches() == 0:
- print 'Refinement time = %s'%convert(time.time() - ref_time)
- return check_sat()
+ print 'Number of latches reduced to 0'
+ print 'CEX refined incorrectly'
+ abc('r %s.aig'%f_name) #revert to previous aig
+ sims = sims_old
+ add_trace('de_speculate')
+ return Error
init = False # make it so that next time it is not the first time through
if not t == last_verify_time: # heuristic that if increased last verify time,
# then try pord_all
@@ -1370,45 +1951,89 @@ def speculate():
if reg_verify:
t_init = (time.time() - yy)/2 #start poor man's concurrency at last cex fime found
t_init = min(10,t_init)
-## temporary reg_verify = False #will cause pord_all to be used next
-## print 'pord_all turned on'
t = last_verify_time
print 'Verify time set to %d'%t
- abc('w %s_beforerpm.aig'%f_name)
- rep_change = reparam() #must be paired with reconcile below if cex
- abc('w %s_afterrpm.aig'%f_name)
- if reg_verify:
- result = verify(J,t)
+ if not re_try:
+## abc('w %s_beforerpm.aig'%f_name)
+## rep_change = reparam() #must be paired with reconcile below if cex
+#### if rep_change:
+#### add_trace('reparam')
+## abc('w %s_afterrpm.aig'%f_name)
+ rep_change = False #TEMP
+ if reg_verify:
+ if par_verify:
+ S,L_sat_POs,s = par_multi_sat(120)
+ L_sat_POs = indices(s,1)
+## L_sat_POs = L[1]
+ L=[]
+ for j in range(len(L_sat_POs)): #eliminate any of the original POs
+ if L_sat_POs[j] >= (n_pos_before-n_pos_proved):
+ L=L+[L_sat_POs[j]]
+ L_sat_POs = L
+ print L
+ if not L_sat_POs == []:
+ ress = [1,[['multi_sat']]]
+ add_trace(['multi_sat'])
+ else:
+ reg_verify = False
+ ress = pord_1_2(t)
+ add_trace(ress[1])
+ else:
+ ttt = time.time() #find time it takes to find a cex
+ ress = verify(J,t)
+ t_last_verify = time.time() - ttt
+ else:
+ ress = pord_1_2(t)
+## print ress
+ add_trace(ress[1])
+ result = ress[0]
+## add_trace(ress[1])
else:
- result = pord_1_2(t)
+ if not retry(100):
+ add_trace('de_speculate')
+ return Error
+ result = get_status()
+## print result
if result == Unsat:
+ add_trace('UNSAT by %s'%ress[1])
print 'UNSAT'
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Unsat
if result < Unsat:
+ abc('&r -s %s_gore.aig;&w %s_gore_before.aig'%(f_name,f_name)) #we are making sure that none of the original POs fail
+ if par_verify:
+ refine_without_cex(L_sat_POs)
+ print 'refining without cex done'
+ continue
if not reg_verify:
set_cex(cex_list)
-## if reg_verify:
- reconcile(rep_change) #end of pairing with reparam()
- assert (npi == n_cex_pis()),'ERROR: #pi = %d, #cex_pi = %d'%(npi,n_cex_pis())
- abc('&r %s_gore.aig;&w %s_gore_before.aig'%(f_name,f_name)) #we are making sure that none of the original POs fail
+## if not re_try:
+#### rec = reconcile(rep_change) #end of pairing with reparam()TEMP
+#### if rec == 'error':
+#### add_trace('de_speculate')
+#### return Error
+## assert (npi == n_cex_pis()),'ERROR: #pi = %d, #cex_pi = %d'%(npi,n_cex_pis())
+ abc('&r -s %s_gore.aig;&w %s_gore_before.aig'%(f_name,f_name)) #we are making sure that none of the original POs fail
if reg_verify:
PO = set_cex_po(0) #testing the regular space
else:
- abc('&r %s_gsrm.aig'%f_name)
+ abc('&r -s %s_gsrm.aig'%f_name)
PO = set_cex_po(1) # test against the &space.
- print 'cex_PO is %d'%PO
+ print 'cex_PO is %d, '%PO,
if (-1 < PO and PO < (n_pos_before-n_pos_proved)):
print 'Found cex in original output = %d'%cex_po()
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Sat_true
if PO == -1:
+ add_trace('de_speculate')
return Error
refine_with_cex() #change the number of equivalences
+ if not par_verify and t_last_verify > 2500:
+ par_verify = True #switch to finding many POs at a time
continue
elif (is_unsat() or n_pos() == 0):
print 'UNSAT'
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Unsat
else: #if undecided, record last verification time
print 'Refinement returned undecided in %d sec.'%t
@@ -1423,29 +2048,33 @@ def speculate():
abc('w %s_beforerpm.aig'%f_name)
rep_change = reparam() #must be paired with reconcile()below
abc('w %s_afterrpm.aig'%f_name)
- result = pord_1_2(t) #main call to verification
+ ress = pord_1_2(t) #main call to verification
+ print ress
+ result = ress[0]
+ add_trace(ress[1])
if result == Unsat:
print 'UNSAT'
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Unsat
- if is_sat():
- assert result == get_status(),'result: %d, status: %d'%(result,get_status())
+ if is_sat() or result == Sat:
+## assert result == get_status(),'result: %d, status: %d'%(result,get_status())
+ print 'result: %d, status: %d'%(result,get_status())
set_cex(cex_list)
- reconcile(rep_change)
- abc('&r %s_gsrm.aig'%f_name)
+ rec = reconcile(rep_change)
+ if rec == 'error':
+ add_trace('de_speculate')
+ return Error
+ abc('&r -s %s_gsrm.aig'%f_name)
PO = set_cex_po(1) #testing the & space
if (-1 < PO and PO < (n_pos_before-n_pos_proved)):
print 'Found cex in original output = %d'%cex_po()
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
return Sat_true
if PO == -1:
+ add_trace('de_speculate')
return Error
refine_with_cex() #change the number of equivalences
continue
- elif is_unsat():
- print 'UNSAT'
- print 'Refinement time = %s'%convert(time.time() - ref_time)
- return Unsat
else: #if undecided, record last verification time
last_verify_time = t
print 'UNDECIDED'
@@ -1455,72 +2084,69 @@ def speculate():
break
sims = sims_old
print 'UNDECIDED'
- print 'Refinement time = %s'%convert(time.time() - ref_time)
+ print 'Refinement time = %0.2f'%(time.time() - ref_time)
## if last_srm_po_size == initial_po_size: #essentially nothing happened. last_srm_po_size will be # POs in last srm.
if initial_sizes == [n_pis(),n_pos(),n_latches(),n_ands()]:
+ abc('r %s.aig'%f_name)
+ add_trace('de_speculate')
return Undecided_no_reduction #thus do not write spec file
else: #file was changed, so some speculation happened. If we find a cex later, need to know this.
write_file('spec')
return Undecided_reduction
-def simple_sat(t=10000):
+def simple_sat(t=900):
+ """
+ aimed at trying harder to prove SAT
+ """
y = time.time()
- J = [14,2,7,9,30,31,26,5] #5 is pre_simp
+ bmcs2 = [9,31]
+ bmcs2 = [9,30]
+ J = allbmcs+pdrs+sims+[5]
+## J = modify_methods(J)
+## J = [14,2,7,9,30,31,26,5] #5 is pre_simp
funcs = create_funcs(J,t)
mtds =sublist(methods,J)
+ print mtds
fork_last(funcs,mtds)
result = get_status()
if result > Unsat:
write_file('smp')
- result = verify(slps+[14,2,3,7,9,30,31,26],t)
+ result = verify(slps+allbmcs+pdrs+sims,t)
print 'Time for simple_sat = %0.2f'%(time.time()-y)
- return RESULT[result]
+ report_bmc_depth(max(max_bmc,n_bmc_frames()))
+ return [RESULT[result[0]]] + [result[1]]
-def simple(t=10000):
+def simple(t=10000,no_simp=0):
y = time.time()
-## J = [14,1,2,7,9,23,30,5] #5 is pre_simp
-## funcs = create_funcs(J,t)
-## mtds =sublist(methods,J)
-## fork_last(funcs,mtds)
-## result = get_status()
-## if result > Unsat:
-## write_file('smp')
-## J = slps+bmcs+pdrs+intrps
-## J = modify_methods(J)
-## result = verify(J,t)
- J = slps+bmcs+pdrs+intrps
+## pre_simp()
+ if not no_simp:
+ prove_part_1()
+ if is_sat():
+ return ['SAT']+['pre_simp']
+ if is_unsat():
+ return ['UNSAT']+['pre_simp']
+ if n_latches() == 0:
+ return [RESULT[check_sat()]]+['pre_simp']
+## J = slps+sims+bmcs+pdrs+intrps+pre
+ J = slps+sims+allbmcs+allpdrs+intrps
J = modify_methods(J)
result = verify(J,t)
-## print 'Time for simple = %0.2f'%(time.time()-y)
- return RESULT[result]
-
+## add_pord('%s by %s'%(result[0],result[1])
+ return [RESULT[result[0]]] + [result[1]]
def simple_bip(t=1000):
y = time.time()
- J = [14,1,2,30,5] #5 is pre_simp
+ J = [0,14,1,2,30,5] #5 is pre_simp
funcs = create_funcs(J,t)
mtds =sublist(methods,J)
fork_last(funcs,mtds)
result = get_status()
if result > Unsat:
write_file('smp')
- result = verify(slps+[14,1,2,30],t)
+ result = verify(slps+[0,14,1,2,30],t)
print 'Time for simple_bip = %0.2f'%(time.time()-y)
return RESULT[result]
-def simple_prove(t=1000):
- y = time.time()
- J = [7,9,23,30,5]
- funcs = create_funcs(J,t)
- mtds =sublist(methods,J)
- fork_last(funcs,mtds)
- result = get_status()
- if result > Unsat:
- write_file('smp')
- result = verify(slps+[7,9,23,30],t)
- print 'Time for simple_prove = %0.2f'%(time.time()-y)
- return RESULT[result]
-
def check_same_gsrm(f):
## return False #disable the temporarily until can figure out why this is there
"""checks gsrm miters before and after refinement and if equal there is an error"""
@@ -1541,11 +2167,11 @@ def check_cex():
""" check if the last cex still asserts one of the outputs.
If it does then we have an error"""
global f_name
- abc('read_status %s_before.status'%f_name)
- abc('&r %s_gsrm_before.aig'%f_name)
+ abc('read_status %s_before_refine.status'%f_name)
+ abc('&r -s %s_gsrm_before.aig'%f_name)
## abc('&r %s_gsrm.aig'%f_name)
- run_command('testcex')
- print 'cex po = %d'%cex_po()
+ run_command('testcex') #test the cex against the &-space aig.
+## print 'cex po = %d'%cex_po()
return cex_po() >=0
def set_size():
@@ -1577,28 +2203,28 @@ def inferior_size():
result = ((npi < n_pis()) or (npo < n_pos()) or (nands < n_ands()) )
return result
-def quick_verify(n):
- """Low resource version of final_verify n = 1 means to do an initial
- simplification first. Also more time is allocated if n =1"""
- global last_verify_time
- trim()
- if n == 1:
- simplify()
- if n_latches == 0:
- return check_sat()
- trim()
- if is_sat():
- return Sat_true
- #print 'After trimming: ',
- #ps()
- set_globals()
- last_verify_time = t = max(1,.4*G_T)
- if n == 1:
- last_verify_time = t = max(1,2*G_T)
- print 'Verify time set to %d '%last_verify_time
- J = [18] + intrps+bmcs+pdrs+sims
- status = verify(J,t)
- return status
+##def quick_verify(n):
+## """Low resource version of final_verify n = 1 means to do an initial
+## simplification first. Also more time is allocated if n =1"""
+## global last_verify_time
+## trim()
+## if n == 1:
+## simplify()
+## if n_latches == 0:
+## return check_sat()
+## trim()
+## if is_sat():
+## return Sat_true
+## #print 'After trimming: ',
+## #ps()
+## set_globals()
+## last_verify_time = t = max(1,.4*G_T)
+## if n == 1:
+## last_verify_time = t = max(1,2*G_T)
+## print 'Verify time set to %d '%last_verify_time
+## J = [18] + intrps+bmcs+pdrs+sims
+## status = verify(J,t)
+## return status
def process_status(status):
""" if there are no FF, the problem is combinational and we still have to check if UNSAT"""
@@ -1621,25 +2247,83 @@ def get_status():
status = Undecided
return status
-def reparam():
+def two_temp(t=20):
+ tt = time.time()
+ abc('tempor;scl;drw;&get;&rpm;&put;tempor;scl;drw;&get;&rpm;&put;scorr')
+ print 'Time for two_temp = %.2f'%(time.time()-tt)
+ return get_status()
+
+def reparam_m():
"""eliminates PIs which if used in abstraction or speculation must be restored by
- reconcile and the cex made compatible with file beforerpm"""
-## return
- rep_change = False
+ reconcile and the cex made compatible with file beforerpm
+ Uses the &-space
+ """
+ abc('w %s_temp.aig'%f_name)
n = n_pis()
-## abc('w t1.aig')
+ t1 = time.time()
+## abc('&get;,reparam -aig=%s_rpm.aig; r %s_rpm.aig'%(f_name,f_name))
+ abc('&get;&rpm;&put')
+ tm = (time.time() - t1)
+ if n_pis() == 0:
+ print 'Number of PIs reduced to 0. Added a dummy PI'
+ abc('addpi')
+ nn = n_pis()
+ if nn < n:
+ print 'Reparam_m: PIs %d => %d, time = %.2f'%(n,nn,tm)
+ rep_change = True
+ else:
+ abc('r %s_temp.aig'%f_name)
+ rep_change = False
+ return rep_change
+
+def reparam_e():
+ """eliminates PIs which if used in abstraction or speculation must be restored by
+ reconcile and the cex made compatible with file beforerpm
+ Uses the &-space
+ """
+ abc('w %s_temp.aig'%f_name)
+ n = n_pis()
+ t1 = time.time()
abc('&get;,reparam -aig=%s_rpm.aig; r %s_rpm.aig'%(f_name,f_name))
-## abc('w t2.aig')
-## abc('testcex')
+## abc('&get;&rpm;&put')
+ tm =(time.time() - t1)
if n_pis() == 0:
print 'Number of PIs reduced to 0. Added a dummy PI'
abc('addpi')
nn = n_pis()
if nn < n:
- print 'Reparam: PIs %d => %d'%(n,nn)
+ print 'Reparam_e: PIs %d => %d, time = %.2f'%(n,nn,tm)
rep_change = True
+ else:
+ abc('r %s_temp.aig'%f_name)
+ rep_change = False
return rep_change
+def reparam():
+## abc('w %s_temp.aig'%f_name)
+## res = reparam_e()
+## res = reparam_m()
+ res = reparam_e()
+ return res
+
+##def try_and_rpm():
+## abc('w %s_temp.aig'%f_name)
+## n = n_pis()
+## t1 = time.time()
+## abc('&get;&rpm;&put')
+## print 'time &rpm = %.2f'%(time.time() - t1)
+## if n_pis() == 0:
+## print '&rpm: Number of PIs reduced to 0. Added a dummy PI'
+## abc('addpi')
+## nn = n_pis()
+## if nn < n:
+## print '&rpm: Reparam: PIs %d => %d'%(n,nn)
+#### rep_change = True
+## abc('r %s_temp.aig'%f_name)
+#### else:
+#### abc('r %s_temp.aig'%f_name)
+#### return False
+
def reconcile(rep_change):
"""used to make current cex compatible with file before reparam() was done.
However, the cex may have come
@@ -1650,19 +2334,41 @@ def reconcile(rep_change):
## print 'rep_change = %s'%rep_change
if rep_change == False:
return
- abc('&r %s_beforerpm.aig; &w tt_before.aig'%f_name)
+ abc('&r -s %s_beforerpm.aig; &w tt_before.aig'%f_name)
abc('write_status %s_after.status;write_status tt_after.status'%f_name)
- abc('&r %s_afterrpm.aig;&w tt_after.aig'%f_name)
+ abc('&r -s %s_afterrpm.aig;&w tt_after.aig'%f_name)
POa = set_cex_po(1) #this should set cex_po() to correct PO. A 1 here means it uses &space to check
abc('reconcile %s_beforerpm.aig %s_afterrpm.aig'%(f_name,f_name))
# reconcile modifies cex and restores work AIG to beforerpm
abc('write_status %s_before.status;write_status tt_before.status'%f_name)
- POb = set_cex_po()
+ POb = set_cex_po()#does not make sense if we are in absstraction refinement
if POa != POb:
- abc('&r %s_beforerpm.aig; &w tt_before.aig'%f_name)
- abc('&r %s_afterrpm.aig; &w tt_after.aig'%f_name)
+ abc('&r -s %s_beforerpm.aig; &w tt_before.aig'%f_name)
+ abc('&r -s %s_afterrpm.aig; &w tt_after.aig'%f_name)
print 'cex PO afterrpm = %d not = cex PO beforerpm = %d'%(POa,POb)
- assert POa > -1, 'cex did not assert any output'
+ if POa < 0: #'cex did not assert any output'
+ return 'error'
+
+def reconcile_a(rep_change):
+ """ This is the reconcile used in abstraction refinement
+ used to make current cex compatible with file before reparam() was done.
+ However, the cex may have come
+ from extracting a single output and verifying this.
+ Then the cex_po is 0 but the PO it fails could be anything.
+ So testcex rectifies this."""
+ global n_pos_before, n_pos_proved
+## print 'rep_change = %s'%rep_change
+ if rep_change == False:
+ return
+ abc('&r -s %s_beforerpm.aig; &w tt_before.aig'%f_name)
+ abc('write_status %s_after.status;write_status tt_after.status'%f_name)
+ abc('&r -s %s_afterrpm.aig;&w tt_after.aig'%f_name)
+ POa = set_cex_po(1) #this should set cex_po() to correct PO. A 1 here means it uses &space to check
+ abc('reconcile %s_beforerpm.aig %s_afterrpm.aig'%(f_name,f_name))
+ # reconcile modifies cex and restores work AIG to beforerpm
+ abc('write_status %s_before.status;write_status tt_before.status'%f_name)
+ if POa < 0: #'cex did not assert any output'
+ return 'error'
def reconcile_all(lst, rep_change):
"""reconciles the list of cex's"""
@@ -1680,34 +2386,34 @@ def reconcile_all(lst, rep_change):
return list
-def try_rpm():
- """rpm is a cheap way of doing reparameterization and is an abstraction method, so may introduce false cex's.
- It finds a minimum cut between the PIs and the main sequential logic and replaces this cut by free inputs.
- A quick BMC is then done, and if no cex is found, we assume the abstraction is valid. Otherwise we revert back
- to the original problem before rpm was tried."""
- global x_factor
- if n_ands() > 30000:
- return
- set_globals()
- pis_before = n_pis()
- abc('w %s_savetemp.aig'%f_name)
- abc('rpm')
- result = 0
- if n_pis() < .5*pis_before:
- bmc_before = bmc_depth()
- #print 'running quick bmc to see if rpm is OK'
- t = max(1,.1*G_T)
- #abc('bmc3 -C %d, -T %f'%(.1*G_C, t))
- abc('&get;,bmc -vt=%f'%t)
- if is_sat(): #rpm made it sat by bmc test, so undo rpm
- abc('r %s_savetemp.aig'%f_name)
- else:
- trim()
- print 'WARNING: rpm reduced PIs to %d. May make SAT.'%n_pis()
- result = 1
- else:
- abc('r %s_savetemp.aig'%f_name)
- return result
+##def try_rpm():
+## """rpm is a cheap way of doing reparameterization and is an abstraction method, so may introduce false cex's.
+## It finds a minimum cut between the PIs and the main sequential logic and replaces this cut by free inputs.
+## A quick BMC is then done, and if no cex is found, we assume the abstraction is valid. Otherwise we revert back
+## to the original problem before rpm was tried."""
+## global x_factor
+## if n_ands() > 30000:
+## return
+## set_globals()
+## pis_before = n_pis()
+## abc('w %s_savetemp.aig'%f_name)
+## abc('rpm')
+## result = 0
+## if n_pis() < .5*pis_before:
+## bmc_before = bmc_depth()
+## #print 'running quick bmc to see if rpm is OK'
+## t = max(1,.1*G_T)
+## #abc('bmc3 -C %d, -T %f'%(.1*G_C, t))
+## abc('&get;,bmc -vt=%f'%t)
+## if is_sat(): #rpm made it sat by bmc test, so undo rpm
+## abc('r %s_savetemp.aig'%f_name)
+## else:
+## trim()
+## print 'WARNING: rpm reduced PIs to %d. May make SAT.'%n_pis()
+## result = 1
+## else:
+## abc('r %s_savetemp.aig'%f_name)
+## return result
def verify(J,t):
"""This method is used for finding a cex during refinement, but can also
@@ -1722,7 +2428,9 @@ def verify(J,t):
#print J,t
F = create_funcs(J,t)
(m,result) = fork_break(F,mtds,'US') #FORK here
- assert result == get_status(),'result: %d, status: %d'%(result,get_status())
+## result = fork_break(F,mtds,'US') #FORK here
+ print result
+## assert result[0] == get_status(),'result: %d, status: %d'%(result[0],get_status())
return result
def dsat_all(t=100,c=100000):
@@ -1735,7 +2443,7 @@ def dsat_all(t=100,c=100000):
abc('w %s_temp.aig'%f_name)
for j in J:
tt = time.time()
- abc('r %s_temp.aig'%f_name)
+ abc('r %s.aig'%f_name)
run_command('cone -O %d; dc2; dsat -C %d'%(j,c))
if is_unsat():
print 'Output %d is %s'%(j,RESULT[2]),
@@ -1747,15 +2455,23 @@ def dsat_all(t=100,c=100000):
break
print 'Total time = %0.2f'%(time.time() - ttt)
-def check_sat():
+def check_sat(t=900):
"""This is called if all the FF have disappeared, but there is still some logic left. In this case,
the remaining logic may be UNSAT, which is usually the case, but this has to be proved. The ABC command 'dsat' is used fro combinational problems"""
+ global smp_trace
if not n_latches() == 0:
print 'circuit is not combinational'
return Undecided
## print 'Circuit is combinational - checking with dsat'
abc('&get') #save the current circuit
- abc('orpos;dsat -C %d'%G_C)
+ abc('orpos')
+ J = combs+slps
+ mtds = sublist(methods,J)
+## print mtds
+ F = create_funcs(J,t)
+ (m,result) = fork_last(F,mtds) #FORK here
+## print '%s: '%mtds[m],
+## smp_trace = smp_trace + ['%s'%mtds[m]]
if is_sat():
abc('&put')
if n_pos() == 1:
@@ -1790,55 +2506,6 @@ def try_induction(C):
return Undecided_reduction
print 'Induction succeeded'
return Unsat
-
-##def final_verify_recur(K):
-## """During prove we make backups as we go. These backups have increasing abstractions done, which can cause
-## non-verification by allowing false counterexamples. If an abstraction fails with a cex, we can back up to
-## the previous design before the last abstraction and try to proceed from there. K is the backup number we
-## start with and this decreases as the backups fails. For each backup, we just try final_verify.
-## If ever we back up to 0, which is the backup just after simplify, we then try speculate on this. This often works
-## well if the problem is a SEC problem where there are a lot of equivalences across the two designs."""
-## global last_verify_time
-## #print 'Proving final_verify_recur(%d)'%K
-## last_verify_time = 2*last_verify_time
-## print 'Verify time increased to %d'%last_verify_time
-## for j in range(K):
-## i = K-(j+1)
-## abc('r %s_backup_%d.aig'%(initial_f_name,i))
-## if ((i == 0) or (i ==2)): #don't try final verify on original last one
-## status = prob_status()
-## break
-## print '\nVerifying backup number %d:'%i,
-## #abc('r %s_backup_%d.aig'%(initial_f_name,i))
-## ps()
-## #J = [18,0,1,2,3,7,14]
-## J = slps+sims+intrps+bmcs+pdrs
-## t = last_verify_time
-## status = verify(J,t)
-## if status >= Unsat:
-## return status
-## if i > 0:
-## print 'SAT returned, Running less abstract backup'
-## continue
-## break
-## if ((i == 0) and (status > Unsat) and (n_ands() > 0)):
-## print '\n***Running speculate on initial backup number %d:'%i,
-## abc('r %s_backup_%d.aig'%(initial_f_name,i))
-## ps()
-## if n_ands() < 20000:
-#### pre_simp()
-## status = speculate()
-## if ((status <= Unsat) or (status == Error)):
-## return status
-## #J = [18,0,1,2,3,7,14]
-## J = slps+sims+intrps+bmcs+pdrs
-## t = 2*last_verify_time
-## print 'Verify time increased to %d'%last_verify_time
-## status = verify(J,t)
-## if status == Unsat:
-## return status
-## else:
-## return Undecided_reduction
def smp():
abc('smp')
@@ -1848,118 +2515,140 @@ def dprove():
abc('dprove -cbjupr')
def trim():
- global trim_allowed
+ global trim_allowed,smp_trace
if not trim_allowed:
- return
-## abc('trm;addpi')
- reparam()
-## print 'exiting trim'
+ return False
+ result = reparam()
+ return result
def prs(x=True):
- global trim_allowed
+ global trim_allowed, smp_trace
""" If x is set to False, no reparameterization will be done in pre_simp"""
+ global smp_trace
+ smp_trace = []
trim_allowed = x
print 'trim_allowed = ',trim_allowed
- y = time.clock()
- pre_simp()
- print 'Time = %s'%convert(time.clock() - y)
+ y = time.time()
+ result = pre_simp()
+ print 'Time = %0.2f'%(time.time() - y)
write_file('smp')
+ return RESULT[result[0]]
-def pre_simp():
- """This uses a set of simplification algorithms which preprocesses a design.
- Includes forward retiming, quick simp, signal correspondence with constraints, trimming away
- PIs, and strong simplify"""
- global trim_allowed, temp_dec
- tt = time.time()
- set_globals()
- abc('&get; &scl; &put')
- if (n_ands() > 200000 or n_latches() > 50000 or n_pis() > 40000):
- print 'Problem too large, simplification skipped'
- return 'Undecided'
- if ((n_ands() > 0) or (n_latches()>0)):
- trim()
-## ps()
- if n_latches() == 0:
- return check_sat()
-## if n_ands()<70000:
-## abs('scorr -C 5000')
- best_fwrd_min([10,11])
- ps()
- status = try_scorr_constr()
- if ((n_ands() > 0) or (n_latches()>0)):
- trim()
- if n_latches() == 0:
- return check_sat()
- status = process_status(status)
- if status <= Unsat:
- return status
- simplify()
- print 'Simplify: ',
- ps()
- if n_latches() == 0:
- return check_sat()
- if trim_allowed:
- t = min(15,.3*G_T)
- if not '_smp' in f_name: #try this only once on a design
- try_temps(15)
- if n_latches() == 0:
- return check_sat()
- try_phase()
- if n_latches() == 0:
- return check_sat()
- if ((n_ands() > 0) or (n_latches()>0)):
- trim()
- status = process_status(status)
- print 'Simplification time = %0.2f'%(time.time()-tt)
- return status
+def check_push():
+ """save the current aig if it has a different number of latches from last aig on lst"""
+ result = False
+ n = n_latches()
+## ps()
+ abc('&get;cexsave') #save the current aig
+## typ = hist[-1:]
+## print hist
+ run_command('r %s_aigs_%d.aig'%(init_initial_f_name,len(hist)))
+## typ = aigs_pp('pop')
+## aigs.pop() #check latches in last aig.
+ nn = n_latches()
+## ps()
+## aigs.push() # put back last aig.
+## aigs_pp('push',typ)
+ abc('&put;cexload') # restore current aig
+## print 'check_push: current n=%d, previous nn=%d'%(n,nn)
+ if not n == nn: #if number of latches changes need to push the current aig so that reconcile can work.
+## aigs.push()
+## print 'n /= nn'
+ aigs_pp('push','reparam0') #default is push,reparam
+ result = True
+ return result
+
+def dump():
+ """ get rid of the last aig on the list"""
+ abc('&get')
+## aigs.pop()
+ aigs_pp('pop')
+ abc('&put')
-def pre_simp2():
+def test_no_simp():
+ global last_simp
+ ri = float(n_pis())/float(last_simp[0])
+ ro = float(n_pos())/float(last_simp[1])
+ rl = float(n_latches())/float(last_simp[2])
+ ra = float(n_ands())/float(last_simp[3])
+ val = min(ri,ro,rl,ra)
+ if val < .95:
+ print 'simplification worthwhile'
+ return False
+ print 'simplification not worthwhile'
+ return True
+
+def pre_simp(n=0,N=0):
"""This uses a set of simplification algorithms which preprocesses a design.
Includes forward retiming, quick simp, signal correspondence with constraints, trimming away
- PIs, and strong simplify"""
+ PIs, and strong simplify. If n not 0, then do not do phase abs"""
global trim_allowed, temp_dec
- tt = time.time()
- set_globals()
- abc('&get; &scl; &put')
- if (n_ands() > 200000 or n_latches() > 50000 or n_pis() > 40000):
- print 'Problem too large, simplification skipped'
- return 'Undecided'
- if ((n_ands() > 0) or (n_latches()>0)):
- trim()
-## ps()
- if n_latches() == 0:
- return check_sat()
- if n_ands()<70000:
- abc('scorr -C 5000')
- best_fwrd_min([10,11])
- ps()
- status = try_scorr_constr()
- if ((n_ands() > 0) or (n_latches()>0)):
- trim()
- if n_latches() == 0:
- return check_sat()
- status = process_status(status)
- if status <= Unsat:
- return status
- simplify()
- print 'Simplify: ',
- ps()
- if n_latches() == 0:
- return check_sat()
- if trim_allowed:
- t = min(15,.3*G_T)
- if not '_smp' in f_name: #try this only once on a design
- try_temps(15)
- if n_latches() == 0:
- return check_sat()
- try_phase()
- if n_latches() == 0:
- return check_sat()
+ global smp_trace, aigs, last_simp
+ chk_sat = 0
+ smp_trace = []
+ while True:
+ if n_latches() == 0:
+ print 'Circuit is combinational'
+ chk_sat = 1
+ break
+ if test_no_simp():
+ break
+ ttime = time.time()
+ set_globals()
+ smp_trace = smp_trace + ['&scl']
+ abc('&get; &scl; &put')
+ if (n_ands() > 200000 or n_latches() > 50000 or n_pis() > 40000):
+ smp_trace = smp_trace + ['scorr_T']
+ scorr_T(50)
+ ps()
if ((n_ands() > 0) or (n_latches()>0)):
- trim()
- status = process_status(status)
- print 'Simplification time = %0.2f'%(time.time()-tt)
- return status
+ res =a_trim()
+ if n_latches() == 0:
+ break
+ status = get_status()
+ if (n == 0 and (not '_smp' in f_name) or '_cone' in f_name):
+ best_fwrd_min([10,11])
+ ps()
+ status = try_scorr_constr()
+ if ((n_ands() > 0) or (n_latches()>0)):
+ res = a_trim()
+ if n_latches() == 0:
+ break
+ status = process_status(status)
+ if status <= Unsat:
+ last_simp = [n_pis(),n_pos(),n_latches(),n_ands()]
+ return [status,smp_trace,hist]
+ print 'Starting simplify ',
+ simplify(n,N)
+ print 'Simplify: ',
+ ps()
+ if n_latches() == 0:
+ break
+ if trim_allowed and n == 0:
+ t = min(15,.3*G_T)
+ if (not '_smp' in f_name) or '_cone' in f_name: #try this only once on a design
+ tt = 25
+ if n_ands() > 500000:
+ tt = 30
+ res,F = try_temps(tt)
+ if res:
+ aigs_pp('push','tempor')
+ if n_latches() == 0:
+ break
+ if n == 0:
+ res,N = try_phases()
+ if res:
+ aigs_pp('push','phase')
+ if n_latches() == 0:
+ break
+ if ((n_ands() > 0) or (n_latches()>0)):
+ res = a_trim()
+ status = process_status(status)
+ print 'Simplification time = %0.2f'%(time.time()-ttime)
+ last_simp = [n_pis(),n_pos(),n_latches(),n_ands()]
+ return [status, smp_trace,hist]
+ last_simp = [n_pis(),n_pos(),n_latches(),n_ands()]
+ return [check_sat(),smp_trace,hist]
def try_scorr_constr():
@@ -1996,7 +2685,7 @@ def ok_phases(n):
""" only try those where the resulting n_ands does not exceed 60000"""
f = factors(n)
sp = subproducts(f)
- s = map(lambda m:m*n_ands()< 90000,sp)
+ s = map(lambda m:m*n_ands()< 120000,sp)
z = select(s,sp)
return z
@@ -2029,18 +2718,32 @@ def product(ll):
def drop(i,ll):
return ll[:i]+ll[i+1:]
+def try_phases():
+#### print 'entered try_phases ',
+## ps()
+ no = n_pos()
+ res = try_phase()
+## print 'after try_phase ',
+## ps()
+ N = n_pos()/no
+ if N > 1:
+ res = True
+ else:
+ res = False
+ return res,N
+
def try_phase():
"""Tries phase abstraction. ABC returns the maximum clock phase it found using n_phases.
Then unnrolling is tried up to that phase and the unrolled model is quickly
simplified (with retiming to see if there is a significant reduction.
If not, then revert back to original"""
- global init_simp
- trim()
+ global init_simp, smp_trace,aigs
n = n_phases()
## if ((n == 1) or (n_ands() > 45000) or init_simp == 0):
- if ((n == 1) or (n_ands() > 45000)):
- return
+ if ((n == 1) or (n_ands() > 60000)):
+ return False
## init_simp = 0
+ res = a_trim()
print 'Trying phase abstraction - Max phase = %d'%n,
abc('w %s_phase_temp.aig'%f_name)
na = n_ands()
@@ -2050,7 +2753,7 @@ def try_phase():
z = ok_phases(n)
print z,
if len(z) == 1:
- return
+ return False
#p = choose_phase()
p = z[1]
abc('phase -F %d'%p)
@@ -2058,7 +2761,7 @@ def try_phase():
print 'Phase %d is incompatible'%p
abc('r %s_phase_temp.aig'%f_name)
if len(z)< 3:
- return
+ return False
else:
p = z[2]
#print 'Trying phase = %d: '%p,
@@ -2066,37 +2769,47 @@ def try_phase():
if no == n_pos(): #nothing happened because p is not mod period
print 'Phase %d is incompatible'%p
abc('r %s_phase_temp.aig'%f_name)
- return
+ return False
+ else:
+ smp_trace = smp_trace + ['phase -F %d'%p]
+ abc('r %s_phase_temp.aig'%f_name)
+ abc('&get;&frames -o -F %d;&scl;&put'%p)
+ else:
+ abc('r %s_phase_temp.aig'%f_name)
+ abc('&get;&frames -o -F %d;&scl;&put'%p)
+ smp_trace = smp_trace + ['phase -F %d'%p]
print 'Simplifying with %d phases: => '%p,
- simplify()
- trim()
+ smp_trace = smp_trace + ['simplify(1)']
+ simplify(1)
+## res = a_trim() #maybe we don't need this because rel_cost uses n_real_inputs
ps()
cost = rel_cost([ni,nl,na])
print 'New relative cost = %f'%(cost)
if cost < -.01:
abc('w %s_phase_temp.aig'%f_name)
if ((n_latches() == 0) or (n_ands() == 0)):
- return
+ return True
if n_phases() == 1: #this bombs out if no latches
- return
+ return False
else:
- try_phase()
- return
+ result = try_phase()
+ return result
elif len(z)>2: #Try the next eligible phase.
abc('r %s_phase_temp.aig'%f_name)
if p == z[2]: #already tried this
- return
+ return False
p = z[2]
print 'Trying phase = %d: => '%p,
abc('phase -F %d'%p)
if no == n_pos(): #nothing happened because p is not mod period
print 'Phase = %d is not compatible'%p
- return
- ps()
+ return False
+ abc('r %s_phase_temp.aig'%f_name)
+ abc('&get;&frames -o -F %d;&scl;&put'%p)
+ smp_trace = smp_trace + ['phase -F %d'%p]
print 'Simplify with %d phases: '%p,
- simplify()
- trim()
- ps()
+ simplify(1)
+## res =a_trim() #maybe we don't need this because rel_cost uses n_real_inputs
cost = rel_cost([ni,nl,na])
print 'New relative cost = %f'%(cost)
if cost < -.01:
@@ -2104,49 +2817,108 @@ def try_phase():
print_circuit_stats()
abc('w %s_phase_temp.aig'%f_name)
if ((n_latches() == 0) or (n_ands() == 0)):
- return
+ return True
if n_phases() == 1: # this bombs out if no latches
- return
+ return True
else:
- try_phase()
- return
+ result = try_phase()
+ return result
+ else:
+ smp_trace = smp_trace + ['de_phase']
abc('r %s_phase_temp.aig'%f_name)
- return
+ return False
def try_temp(t=15):
+ global smp_trace,aigs
btime = time.clock()
- trim()
- print'Trying temporal decomposition - for max %s sec. '%convert(t),
- abc('w %s_best.aig'%f_name)
- ni = n_pis()
+## res = a_trim() #maybe we don't want this here.
+ print'Trying temporal decomposition - for max %0.2f sec. '%(t),
+ abc('w %s_best_temp.aig'%f_name)
+## ni = n_pis()
+ ni = n_real_inputs()
nl = n_latches()
na = n_ands()
best = [ni,nl,na]
+ cost_best = 0
+ i_best = 0
+ n_done = 0
+ print 'best = ',
+ print best
F = create_funcs([18],t) #create a timer function
-## F = F + [eval('(pyabc_split.defer(abc)("tempor -s; trm; scr; trm; tempor; trm; scr; trm"))')]
- F = F + [eval('(pyabc_split.defer(abc)("tempor -s; &get; &trim -o; &put; scr; &get; &trim -o; &put; tempor; &get; &trim -o; &put; scr; &get; &trim -o; &put"))')]
+ F = F + [eval('(pyabc_split.defer(struc_temp)())')]
+ F = F + [eval('(pyabc_split.defer(full_temp)())')]
+ F = F + [eval('(pyabc_split.defer(two_temp)())')]
for i,res in pyabc_split.abc_split_all(F):
- break
- cost = rel_cost(best)
+## print i,res
+ if i == 0:
+ break
+ if n_latches() == 0:
+ return True
+ n_done = n_done+1
+ cost = rel_cost(best)
+ if cost<0:
+ nri=n_real_inputs()
+ best = (nri,n_latches(),n_ands())
+ abc('w %s_best_temp.aig'%f_name)
+ i_best = i
+ cost_best = cost
+ print 'cost = %.2f, best = '%cost,
+ print best
+## if i == 1:
+## smp_trace = smp_trace + ['tempor -s']
+## if i == 2:
+## smp_trace = smp_trace + ['tempor']
+## if n_latches == 0:
+## break
+ if n_done > 2:
+ break
+## cost = rel_cost(best)
+ cost = cost_best
print 'cost = %0.2f'%cost
- if cost < .01:
+ abc('r %s_best_temp.aig'%f_name)
+## if cost < -.01:
+ if cost<0:
ps()
- return
+ return True
else:
- abc('r %s_best.aig'%f_name)
+## smp_trace = smp_trace + ['de_tempor']
+## abc('r %s_best_temp.aig'%f_name)
+ return False
+
+def struc_temp():
+ abc('tempor -s;scr')
+ result = quick_simp()
+ if result == 'UNSAT':
+ return Unsat
+ elif result == 'SAT':
+ return Sat
+ return Undecided
+
+def full_temp():
+ abc('tempor')
+ return simplify()
-def try_temps(t=15):
+def try_temps(t=20):
+ """ need to modify something to be able to update cex"""
+ global smp_trace
+ abc('w %s_try_temps.aig'%f_name)
best = (n_pis(),n_latches(),n_ands())
+ npi = n_pis()
+ F = 1
while True:
- try_temp(t)
- if ((best == (n_pis(),n_latches(),n_ands())) or n_ands() > .9 * best[2] ):
+ res = try_temp(t)
+ ps()
+ if n_latches() == 0:
break
- elif n_latches() == 0:
+ if res == False:
+ return False,F
+ if ((best == (n_pis(),n_latches(),n_ands())) or n_ands() > .9 * best[2] ):
break
else:
+ smp_trace = smp_trace + ['tempor']
best = (n_pis(),n_latches(),n_ands())
+ return True,n_pis()/npi
-
def rel_cost_t(J):
""" weighted relative costs versus previous stats."""
if (n_latches() == 0 and J[1]>0):
@@ -2159,7 +2931,7 @@ def rel_cost_t(J):
#ri = (float(nri)-float(ni))/float(ni)
rli = (float(n_latches()+nri)-float(nli))/float(nli)
ra = (float(n_ands())-float(na))/float(na)
- cost = 10*rli + .5*ra
+ cost = 10*rli + 1*ra #changed from .5 to 1 on ra
return cost
def rel_cost(J):
@@ -2176,16 +2948,18 @@ def rel_cost(J):
ri = (float(nri)-float(ni))/float(ni)
rl = (float(n_latches())-float(nl))/float(nl)
ra = (float(n_ands())-float(na))/float(na)
- cost = 1*ri + 5*rl + .2*ra
+ cost = 1*ri + 5*rl + .25*ra
## print 'Relative cost = %0.2f'%cost
return cost
-def best_fwrd_min(J):
- global f_name, methods
+def best_fwrd_min(J,t=30):
+ global f_name, methods,smp_trace
+ J=[18]+J
mtds = sublist(methods,J)
- F = create_funcs(J,0)
+ F = create_funcs(J,t)
(m,result) = fork_best(F,mtds) #FORK here
print '%s: '%mtds[m],
+ smp_trace = smp_trace + ['%s'%mtds[m]]
def try_forward():
"""Attempts most forward retiming, and latch correspondence there. If attempt fails to help simplify, then we revert back to the original design
@@ -2198,7 +2972,7 @@ def try_forward():
na = n_ands()
abc('w %s_savetemp0.aig'%f_name)
abc('r %s_savetemp.aig'%f_name)
- abc('dr -m')
+ abc('dretime -m')
abc('lcorr')
abc('dr')
if ((n_latches() <= nl) and (n_ands() < na)):
@@ -2210,6 +2984,13 @@ def try_forward():
return
return
+def qqsimp():
+ abc('&get;&scl;,reparam;&scorr -C 0;&scl;,reparam;&put')
+ shrink()
+ abc('w %ssimp.aig'%f_name)
+ ps()
+
+
def quick_simp():
"""A few quick ways to simplify a problem before more expensive methods are applied.
Uses & commands if problem is large. These commands use the new circuit based SAT solver"""
@@ -2217,21 +2998,22 @@ def quick_simp():
if na < 60000:
abc('scl -m;lcorr;drw')
else:
- abc('&get;&scl;&lcorr;&put;drw')
+ abc('&get;&scl;&lcorr;&put')
+ if n_ands() < 500000:
+ abc('drw')
print 'Using quick simplification',
status = process_status(get_status())
if status <= Unsat:
result = RESULT[status]
else:
ps()
-## write_file('smp')
-#### K_backup = K = K+1
result = 'UNDECIDED'
return result
def scorr_constr():
"""Extracts implicit constraints and uses them in signal correspondence
Constraints that are found are folded back when done"""
+ global aigs
na = max(1,n_ands())
n_pos_before = n_pos()
if ((na > 40000) or n_pos()>1):
@@ -2239,32 +3021,51 @@ def scorr_constr():
abc('w %s_savetemp.aig'%f_name)
na = max(1,n_ands())
## f = 1
- f = 18000/na #**** THIS can create a bug 10/15/11
+ f = 18000/na #**** THIS can create a bug 10/15/11. see below
f = min(f,4)
f = max(1,f)
+ print 'Looking for constraints - ',
if n_ands() > 18000:
cmd = 'unfold -s -F 2'
else:
cmd = 'unfold -F %d -C 5000'%f
abc(cmd)
if n_pos() == n_pos_before:
- print 'No constraints found'
+ print 'none found'
return Undecided_no_reduction
if (n_ands() > na): #no constraints found
abc('r %s_savetemp.aig'%f_name)
return Undecided_no_reduction
na = max(1,n_ands())
- f = 1
- print 'Number of constraints = %d'%((n_pos() - n_pos_before))
+ f = 1 #put here until bug is fixed.
+ print 'found %d constraints'%((n_pos() - n_pos_before))
abc('scorr -c -F %d'%f)
abc('fold')
- trim()
+ res = a_trim()
print 'Constrained simplification: ',
ps()
return Undecided_no_reduction
+def a_trim():
+ """ this is set up to put the aig on the aigs list if trim was successful"""
+## print 'trimming'
+## print 5.1
+ pushed = check_push() #checking if a push is needed and if so do it.
+ #It is not needed if flops match previous aig
+## print 5.2
+ res = trim()
+## print 5.3
+ if res:
+ aigs_pp()
+## aigs.push() #store the aig after rpm if it did something
+ elif pushed: #since trim did not do anything, we don't need the last push done by check push
+ dump() #dump the last aig on the list
+## print 5.4
+ return res
+
def try_scorr_c(f):
""" Trying multiple frames because current version has a bug."""
+ global aigs
set_globals()
abc('unfold -F %d'%f)
abc('scorr -c -F %d'%f)
@@ -2274,7 +3075,7 @@ def try_scorr_c(f):
if is_sat():
return 0
else:
- trim()
+ res = a_trim()
return 1
@@ -2286,7 +3087,7 @@ def input_x_factor():
print 'x_factor set to %f'%x_factor
-def prove(a):
+def prove(a=0,abs_tried = False):
"""Proves all the outputs together. If ever an abstraction
was done then if SAT is returned,
we make RESULT return "undecided".
@@ -2305,6 +3106,8 @@ def prove(a):
result = prove_part_1() #do full simplification here
if ((result == 'SAT') or (result == 'UNSAT')):
return result
+ if n_latches() == 0:
+ return 'UNDECIDED'
if a == 1:
spec_first = True
t_init = 2
@@ -2312,20 +3115,28 @@ def prove(a):
## First phase
if spec_first:
result = prove_part_3() #speculation done here first
+ if result == 'UNDECIDED' and abs_tried and n_pos() <= 2:
+ add_trace('de_speculate')
+ return result
else:
+ abs_tried = True
result = prove_part_2() #abstraction done here first
if ((result == 'SAT') or (result == 'UNSAT')):
return result
## Second phase
if spec_first: #did spec already in first phase
t_init = 2
+ abs_tried = True
result = prove_part_2() #abstraction done here second
if result == 'SAT':
abs_found_cex_after_spec = True
else:
result = prove_part_3() #speculation done here second
if result == 'SAT':
- spec_found_cex_after_abs = True
+ if '_abs' in f_name:
+ spec_found_cex_after_abs = True
+ else:
+ return result
if result == 'UNSAT':
return result
status = get_status()
@@ -2333,6 +3144,7 @@ def prove(a):
status = Error
if ('_abs' in f_name and spec_found_cex_after_abs): #spec file should not have been written in speculate
f_name = revert(f_name,1) #it should be as if we never did abstraction.
+ add_trace('de_abstract')
print 'f_name = %s'%f_name
abc('r %s.aig'%f_name) #restore previous
t_init = 2
@@ -2340,11 +3152,12 @@ def prove(a):
print 'proving speculation first'
write_file('rev') #maybe can get by with just changing f_name
print 'f_name = %s'%f_name
- result = prove(1) #1 here means do smp and then spec
+ result = prove(1,True) #1 here means do smp and then spec
if ((result == 'SAT') or (result == 'UNSAT')):
return result
elif ('_spec' in f_name and abs_found_cex_after_spec): #abs file should not have been written in abstract
f_name = revert(f_name,1) #it should be as if we never did speculation.
+ add_trace('de_speculate')
abc('r %s.aig'%f_name) #restore previous
t_init = 2
if not '_rev' in f_name:
@@ -2357,20 +3170,34 @@ def prove(a):
return 'UNDECIDED'
def prove_part_1():
- global x_factor,xfi,f_name, last_verify_time,K_backup
+ global x_factor,xfi,f_name, last_verify_time,K_backup,aigs
print 'Initial: ',
ps()
x_factor = xfi
- print '\n***Running pre_simp'
set_globals()
if n_latches() > 0:
- status = run_par_simplify()
+## ps()
+ res = try_frames_2()
+ if res:
+ print 'frames_2: ',
+ ps()
+ aigs_pp('push','phase')
+ print '\n***Running pre_simp'
+ add_trace('pre_simp')
+ result = run_par_simplify()
+ status = result[0]
+ method = result[1]
+ if 'scorr' in method:
+ add_trace(method)
else:
+ print '\n***Circuit is combinational, running check_sat'
+ add_trace('comb_check')
status = check_sat()
if ((status <= Unsat) or (n_latches() == 0)):
return RESULT[status]
- trim()
- write_file('smp') #need to check that this was not written in pre_simp
+ res =a_trim()
+ if not '_smp' in f_name:
+ write_file('smp') #need to check that this was not written in pre_simp
set_globals()
return RESULT[status]
@@ -2378,13 +3205,33 @@ def run_par_simplify():
set_globals()
t = 1000
funcs = [eval('(pyabc_split.defer(pre_simp)())')]
- J = pdrs+bmcs+intrps
+ J = [35]+pdrs[:3]+bmcs[:3]+intrps[:1]+sims # 35 is par_scorr
J = modify_methods(J,1)
+## J = J + bestintrps
funcs = create_funcs(J,t)+ funcs #important that pre_simp goes last
mtds =sublist(methods,J) + ['pre_simp']
- fork_last(funcs,mtds)
+ print mtds
+ result = fork_last(funcs,mtds)
status = get_status()
- return status
+ return [status] + [result]
+
+def try_frames_2():
+ abc('scl')
+ nl = n_latches()
+ if n_ands()> 35000:
+ return
+ abc('w %s_temp.aig'%f_name)
+ abc('&get;&frames -o -F 2;&scl;&put')
+ if n_latches() < .75*nl:
+ print 'frames_2: Number of latches reduced to %d'%n_latches()
+ add_trace('frames_2')
+## res = reparam()
+## xxxxx handle this
+## if res:
+## aigs.push()
+ return True
+ abc('r %s_temp.aig'%f_name)
+ return False
def prove_part_2(ratio=.75):
"""does the abstraction part of prove"""
@@ -2393,30 +3240,30 @@ def prove_part_2(ratio=.75):
## print 'ifbip = %d'%ifbip
status = abstract(ifbip) #ABSTRACTION done here
status = process_status(status)
- print 'abstract done, status is %d'%status
+ print 'abstract done, status is %s'%str(status)
result = RESULT[status]
if status < Unsat:
print 'CEX in frame %d'%cex_frame()
return result #if we found a cex we do not want to trim.
- trim()
return result
def prove_part_3():
"""does the speculation part of prove"""
global x_factor,xfi,f_name, last_verify_time,init_initial_f_name
global max_bmc, sec_options
- if ((n_ands() > 36000) and sec_options == ''):
- sec_options = 'g'
- print 'sec_options set to "g"'
- print '\n***Running speculate'
+## if ((n_ands() > 36000) and sec_options == ''):
+## sec_options = 'g'
+## print 'sec_options set to "g"'
+ print '\n***Running speculate on %s: '%f_name,
+ ps()
+## add_trace('speculate')
status = speculate() #SPECULATION done here
status = process_status(status)
- print 'speculate done, status is %d'%status
+## print 'speculate done, status is %d'%status
result = RESULT[status]
if status < Unsat:
print 'CEX in frame %d'%cex_frame()
return result
- trim() #if cex is found we doo not want to trim.
return result
def prove_all(dir,t):
@@ -2429,7 +3276,7 @@ def prove_all(dir,t):
results = []
f =open('results_%d.txt'%len(dir), 'w')
for name in dir:
- read_file_quiet(name)
+ read_file_quiet_i(name)
print '\n **** %s:'%name,
ps()
F = create_funcs([18,6],t) #create timer function as i = 0 Here is the timer
@@ -2438,13 +3285,13 @@ def prove_all(dir,t):
tt = time.time()
if i == 0:
res = 'Timeout'
- str = '%s: %s, time = %s'%(name,res,convert(tt-xtime))
+ str = '%s: %s, time = %0.2f'%(name,res,(tt-xtime))
if res == 'SAT':
str = str + ', cex_frame = %d'%cex_frame()
str = str +'\n'
f.write(str)
f.flush()
- results = results + ['%s: %s, time = %s'%(name,res,convert(tt-xtime))]
+ results = results + ['%s: %s, time = %0.2f'%(name,res,(tt-xtime))]
xtime = tt
## print results
f.close()
@@ -2468,7 +3315,7 @@ def remove_pos(lst):
if not proved == []:
if ((max(proved)>n_pos()-1) or min(proved)< 0):
print proved
- remove(proved)
+ remove(proved,0)
#functions for proving multiple outputs in parallel
@@ -2629,291 +3476,6 @@ def distribute(N,div):
result = result + [n]
return result
-####def find_cex_par(tt):
-#### """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
-#### with the unproved outputs"""
-#### global trim_allowed,last_winner, last_cex, n_pos_before, t_init, j_last, sweep_time
-#### b_time = time.time() #Wall clock time
-#### n = n_pos()
-#### l=remove_const_pos()
-#### N = n_pos()
-#### full_time = all_proc = False
-#### print 'Number of POs: %d => %d'%(n,N)
-#### if N == 0:
-#### return Unsat
-###### inc = 5 #******* increment for grouping for sweep set here *************
-###### inc = min(12,max(inc, int(.1*N)))
-#### inc = 1+N/100
-###### if N <1.5*inc: # if near the increment for grouping try to get it below.
-###### prove_all_ind()
-###### N = n_pos()
-#### if inc == 1:
-#### prove_all_ind()
-#### N = n_pos()
-#### T = int(tt) #this is the total time to be taken in final verification run before quitting speculation
-###### if inc == 10:
-###### t_init = 10
-###### t = max(t_init/2,T/20)
-###### if N <= inc:
-###### t = T
-###### print "inc = %d, Sweep time = %s, j_group = %d"%(inc,convert(t),j_last)
-#### t = sweep_time/2 #start sweeping at last time where cex was found.
-###### it used to be t = 1 here but it did not make sense although seemed to work.
-###### inc = 2
-#### while True: #poor man's concurrency
-#### N = n_pos()
-#### if N == 0:
-#### return Unsat
-#### #sweep_time controls so that when sweep starts after a cex, it starts at the last sweep time
-#### t = max(2,2*t) #double sweep time
-#### if t > .75*T:
-#### t = T
-#### full_time = True
-#### if ((N <= inc) or (N < 13)):
-#### t = sweep_time = T
-#### full_time = True
-#### inc = 1
-###### sweep_time = 2*sweep_time
-#### if not t == T:
-#### t= sweep_time = max(t,sweep_time)
-###### t = sweep_time
-######new heuristic
-#### if (all_proc and sweep_time > 8): #stop poor man's concurrency and jump to full time.
-#### t = sweep_time = T
-#### full_time - True #this might be used to stop speculation when t = T and the last sweep
-###### found no cex and we do not prove Unsat on an output
-#### abc('w %s__ysavetemp.aig'%f_name)
-#### ps()
-#### if N < 50:
-#### inc = 1
-#### print "inc = %d, Sweep time = %s, j_last = %d"%(inc,convert(t),j_last)
-#### F = []
-###### G = []
-#### #make new lambda functions since after the last pass some of the functions may have been proved and eliminated.
-#### for i in range(N):
-#### F = F + [eval('(pyabc_split.defer(verify_only)(%d,%s))'%(i,convert(T)))] #make time large and let sleep timer control timeouts
-###### G = G + [range(i,i+1)]
-#### ######
-#### result = []
-#### outcome = ''
-#### N = len(F)
-#### rng = range(1+(N-1)/inc)
-#### rng = rng[j_last:]+rng[:j_last] #pick up in range where last found cex.
-###### print 'rng = ',
-###### print rng
-#### k = -1
-#### bb_time = time.time()
-#### for j in rng:
-#### k = k+1 #keeps track of how many groups we have processed.
-#### j_last = j
-#### J = j*inc
-#### JJ = J+inc
-#### JJ = min(N,JJ)
-#### if J == JJ-1:
-#### print 'Function = %d '%J,
-#### else:
-#### print 'Functions = [%d,%d]'%(J,JJ-1)
-#### Fj = create_funcs([18],t+1) #create timer function as i = 0 Here is the timer
-#### Fj = Fj + F[J:JJ]
-#### count = 0
-#### fj_time = time.time()
-#### abc('r %s__ysavetemp.aig'%f_name) #important need to restore aig here so the F refers to right thing when doing verify_only.
-###### # because verify_only changes the aig.
-###### ps()
-#### for i,res in pyabc_split.abc_split_all(Fj):
-#### count = count+1
-#### Ji = J+i-1 #gives output number
-#### if ((res == 0) or (res == 1)):
-#### abc('read_status %d.status'%Ji)
-#### res = get_status()
-#### outcome = 'CEX: Frame = %d, PO = %d, Time = %s'%(cex_frame(),Ji,convert((time.time() - fj_time)))
-#### break
-#### if i == 0: #sleep timer expired
-#### outcome = '*** Time expired in %s sec. Next group = %d to %d ***'%(convert(time.time() - fj_time),JJ,min(N,JJ+inc))
-#### break
-#### elif res == None: #this should not happen
-#### print res
-#### print Ji,RESULT[res],
-#### else: # output Ji was proved
-#### result = result + [[Ji,res]]
-#### if count >= inc:
-#### outcome = '--- all group processed without cex ---'
-#### all_proc = True
-#### break
-#### continue #this can only happen if inc > 1
-#### # end of for i loop
-#### if ((res < Unsat) and (not res == None)):
-#### break
-#### else:
-#### continue # continue j loop
-#### #end of for j loop
-#### if k < len(rng):
-#### t_init = t/2 #next time start with this time.
-#### else:
-#### j_last = j_last+1 #this was last j and we did not find cex, so start at next group
-#### print outcome + ' => ' ,
-#### if ((res < Unsat) and (not res == None)):
-#### t_init = t/2
-#### abc('read_status %d.status'%Ji) #make sure we got the right status file.
-#### #actually if doing abstraction we could remove proved outputs now, but we do not. -**inefficiency**
-#### return res
-#### else: #This implies that no outputs were disproved. Thus can remove proved outputs.
-#### abc('r %s__ysavetemp.aig'%f_name) #restore original aig
-#### if not result == []:
-#### res = []
-#### for j in range(len(result)):
-#### k = result[j]
-#### if k[1] == 2:
-#### res = res + [k[0]]
-###### print res
-###### result = mapp(res,G)
-#### result = res
-###### print result
-#### remove(result) #remove the outputs that were proved UNSAT.
-#### #This is OK for both abstract and speculate
-#### print 'Number of POs reduced to %d'%n_pos()
-#### if n_pos() == 0:
-#### return Unsat
-#### if t>=T:
-#### return Undecided
-#### else:
-#### continue
-#### return Undecided
-
-####def remap_pos():
-#### """ maintains a map of current outputs to original outputs"""
-#### global po_map
-#### k = j = 0
-#### new = []
-#### assert n_pos() == len(po_map), 'length of po_map, %d, and current # POs, %d, don"t agree'%(len(po_map),n_pos())
-#### for j in range(len(po_map)):
-#### N = n_pos()
-#### abc('removepo -N %d'%k) # this removes the output if it is 0 driven
-#### if n_pos() == N:
-#### new = new + [po_map[j]]
-#### k = k+1
-#### if len(new) < len(po_map):
-###### print 'New map = ',
-###### print new
-#### po_map = new
-
-####def prove_mapped():
-#### """
-#### assumes that srm is in workspace and takes the unsolved outputs and proves
-#### them by using proved outputs as constraints.
-#### """
-#### global po_map
-###### print po_map
-#### po_map.sort() #make sure mapped outputs are in order
-#### for j in po_map: #put unsolved outputs first
-#### run_command('swappos -N %d'%j)
-#### print j
-#### N = n_pos()
-#### assert N > len(po_map), 'n_pos = %d, len(po_map) = %d'%(N, len(po_map))
-#### run_command('constr -N %d'%(N-len(po_map))) #make the other outputs constraints
-#### run_command('fold') #fold constraints into remaining outputs.
-#### ps()
-#### prove_all_mtds(100)
-
-####def mapp(R,G):
-#### result = []
-#### for j in range(len(R)):
-#### result = result + G[R[j]]
-#### return result
-
-#_______________________________________
-
-
-####def prove_g_pos_split():
-#### """like prove_g_pos but quits when any output is undecided"""
-#### global f_name, max_bmc,x_factor,x
-#### x = time.clock()
-#### #input_x_factor()
-#### init_f_name = f_name
-#### print 'Beginning prove_g_pos_split'
-#### prove_all_ind()
-#### print 'Number of outputs reduced to %d by fast induction with constraints'%n_pos()
-#### reparam()
-###### try_rpm()
-#### print '********** Proving each output separately ************'
-#### f_name = init_f_name
-#### abc('w %s_osavetemp.aig'%f_name)
-#### n = n_pos()
-#### print 'Number of outputs = %d'%n
-#### pos_proved = []
-#### J = 0
-#### jnext = n-1
-#### while jnext >= 0:
-#### max_bmc = -1
-#### f_name = init_f_name
-#### abc('r %s_osavetemp.aig'%f_name)
-#### jnext_old = jnext
-#### extract(jnext,jnext)
-#### jnext = jnext -1
-#### print '\nProving output %d'%(jnext_old)
-#### f_name = f_name + '_%d'%jnext_old
-#### result = prove_1()
-#### if result == 'UNSAT':
-#### if jnext_old > jnext+1:
-#### print '******** PROVED OUTPUTS [%d-%d] ******** '%(jnext+1,jnext_old)
-#### else:
-#### print '******** PROVED OUTPUT %d ******** '%(jnext_old)
-#### pos_proved = pos_proved + range(jnext +1,jnext_old+1)
-#### continue
-#### if result == 'SAT':
-#### print 'One of output in (%d to %d) is SAT'%(jnext + 1,jnext_old)
-#### return result
-#### else:
-#### print '******** UNDECIDED on OUTPUTS %d thru %d ******** '%(jnext+1,jnext_old)
-#### print 'Eliminating %d proved outputs'%(len(pos_proved))
-#### # remove outputs proved and return
-#### f_name = init_f_name
-#### abc('r %s_osavetemp.aig'%f_name)
-#### remove(pos_proved)
-#### trim()
-#### write_file('group')
-#### return 'UNDECIDED'
-#### f_name = init_f_name
-#### abc('r %s_osavetemp.aig'%f_name)
-#### if not len(pos_proved) == n:
-#### print 'Eliminating %d proved outputs'%(len(pos_proved))
-#### remove(pos_proved)
-#### trim()
-#### write_file('group')
-#### result = 'UNDECIDED'
-#### else:
-#### print 'Proved all outputs. The problem is proved UNSAT'
-#### result = 'UNSAT'
-#### print 'Total time = %f sec.'%(time.clock() - x)
-#### return result
-
-####def group(a,n):
-#### """Groups together outputs beginning at output n and any contiguous preceeding output
-#### that does not increase the latch support by a or more"""
-#### global f_name, max_bmc
-#### nlt = n_latches()
-#### extract(n,n)
-#### nli = n_latches()
-#### if n == 0:
-#### return n-1
-#### for J in range(1,n+1):
-#### abc('r %s_osavetemp.aig'%f_name)
-#### j = n-J
-#### #print 'Running %d to %d'%(j,n)
-#### extract(j,n)
-#### #print 'n_latches = %d'%n_latches()
-#### #if n_latches() >= nli + (nlt - nli)/2:
-#### if n_latches() == nli:
-#### continue
-#### if n_latches() > nli+a:
-#### break
-#### abc('r %s_osavetemp.aig'%f_name)
-###### if j == 1:
-###### j = j-1
-#### print 'extracting [%d-%d]'%(j,n)
-#### extract(j,n)
-#### ps()
-#### return j-1
def extract(n1,n2):
"""Extracts outputs n1 through n2"""
@@ -2924,97 +3486,200 @@ def extract(n1,n2):
def remove_intrps(J):
global n_proc,ifbip
+## print J
npr = n_proc
if 18 in J:
npr = npr+1
if len(J) <= npr:
+## print J
return J
JJ = []
alli = [23,1,22] # if_no_bip, then this might need to be changed
l = len(J)-npr
- alli = alli[:l]
+ alli = alli[l:]
+## J.reverse() #strip off in reverse order.
for i in J:
- if i in alli:
+ if i in alli:
continue
else:
- JJ = JJ +[i]
+ JJ = JJ+[i]
+## print JJ
return JJ
+
+def restrict(lst,v=0):
+ '''restricts the aig to the POs in the list'''
+ #this assumes that there are no const-1 POs. Warning, this will not remove any const-0 POs
+ N = n_pos()
+ lst1 = lst + [N]
+ r_lst = gaps(lst1) #finds POs not in lst
+ if len(r_lst) == N:
+ return
+ remove(r_lst,v)
-def remove(lst):
- """Removes outputs in list"""
+def remove(lst,v=0):
+ """Removes outputs in list
+ WARNING will not remove all POs even if in lst
+ """
global po_map
n_before = n_pos()
- zero(lst)
- l=remove_const_pos()
- print 'n_before = %d, n_list = %d, n_after = %d'%(n_before, len(lst), n_pos())
+ zero(lst,v)
+ l=remove_const_pos(v)
+ assert len(lst) == (n_before - n_pos()),'Inadvertantly removed some const-0 POs.\nPO_before = %d, n_removed = %d, PO_after = %d'%(n_before, len(lst), n_pos())
+ print 'PO_before = %d, n_removed = %d, PO_after = %d'%(n_before, len(lst), n_pos())
-def check_pos():
- """ checks if any POs are 0, and removes them with a warning"""
- N = n_pos()
- l=remove_const_pos()
- if not N == n_pos():
- print 'WARNING: some POs are 0 and are rremoved'
- print '%d POs removed'%(N - n_pos())
-def zero(list):
+def zero(list,v=0):
"""Zeros out POs in list"""
+ if v == 0:
+ cmd = 'zeropo -s -N ' #puts const-0 in PO
+ else:
+ cmd = 'zeropo -so -N ' #puts const-1 in PO
for j in list:
- run_command('zeropo -N %d'%j)
+ run_command('%s%d'%(cmd,j)) #-s prevents the strash after every zeropo
+ abc('st')
-def mark_const_pos(ll=[]):
- """ creates an indicator of which PO are const-0 and which are const-1
- does not change number of POs
+def listr_0_pos():
+ """ returns a list of const-0 pos and removes them
+ """
+ L = range(n_pos())
+ L.reverse()
+ ll = []
+ for j in L:
+ i = is_const_po(j)
+ if i == 0:
+ abc('removepo -N %d'%j) #removes const-0 output
+## print 'removed PO %d'%j
+ ll = [j] + ll
+ return ll
+
+def list_0_pos():
+ """ returns a list of const-0 pos and removes them
"""
+ abc('w %s_savetemp.aig'%f_name)
L = range(n_pos())
L.reverse()
- if ll == []:
- ll = [-1]*n_pos()
- ind = ll
- abc('&get') #save original
+ ll = []
for j in L:
- n_before = n_pos()
- abc('removepo -N %d'%j) #removes const-0 output
- if n_pos() < n_before:
- ind[j]=0
-## print sumsize(ind)
-## ps()
- abc('&put')
+ i = is_const_po(j)
+ if i == 0:
+ abc('removepo -N %d'%j) #removes const-0 output
+## print 'removed PO %d'%j
+ ll = [j] + ll
+ abc('r %s_savetemp.aig'%f_name)
+ return ll
+
+def listr_1_pos():
+ """ returns a list of const-1 pos and removes them
+ """
+ L = range(n_pos())
+ L.reverse()
+ ll = []
for j in L:
- n_before = n_pos()
- abc('removepo -z -N %d'%j) # -z removes const-1 output
- if n_pos() < n_before:
- ind[j]=1
-## ps()
- abc('&put') #put back original
-## remove_const_pos()
+ i = is_const_po(j)
+ if i == 1:
+ abc('removepo -z -N %d'%j) #removes const-1 output
+## print n_pos()
+ ll = [j] + ll
+ return ll
+
+
+def mark_const_pos(ll=[]):
+ """ creates an indicator of which PO are const-0 and which are const-1
+ does not change number of POs
+ """
+ n=n_pos()
+ L = range(n)
+ ll = [-1]*n
+ for j in L:
+ ll[j] = is_const_po(j)
print sumsize(ind)
return ind
-def remove_const_pos():
+def remove_const_pos(n=-1):
global po_map
- """removes the 0 pos, but no pis because we might get cexs and need the correct number of pis
- Should keep tract of if original POs are 0 and are removed.
- Can this happen outside of prove_all_ind or
- pord_all which can set proved outputs to 0???
- WARNING: This can remove constant 1 nodes too???
+ """removes the const 0 or 1 pos according to n, but no pis because we might
+ get cexs and need the correct number of pis
"""
- run_command('&get; &trim -i; &put; addpi') #adds a pi only if there are none
- po_map = range(n_pos())
-
-def psp():
- quick_simp()
- result = run_parallel([6,21],500,'US') #runs 'run_parallel' and sp() in parallel.
-## run_parallel uses JP and TERM to terminate.
- return result
-
-def sp():
+ if n > -1:
+ run_command('&get; &trim -i -V %d; &put'%n) #V is 0 or 1
+ else:
+ run_command('&get; &trim -i; &put') #removes both constants
+
+def unmap_cex():
+ """ aig before trim is put in reg-space and after trim in the &space
+ Before and after need to have same number of flops in order o reconcile
+ aigs list should be such that if before and after don't match in number of latches,
+ then some operation changed the flops and we just update the aig with the new number
+ reconcile leaves before aig in reg-space after cex has been updated so cex and aig
+ always match
+ """
+ global aigs,hist
+ print hist
+## while not aigs == []:
+ while not len(hist) == 0:
+ n = n_latches()
+ abc('&get') #save the current aig in &-space
+ print 'Number of PIs in cex = %d'%n_cex_pis()
+ typ = aigs_pp('pop')
+ print typ,
+ ps()
+ if typ == 'phase':
+ typ2 =aigs_pp('pop') #gets the aig before phase
+ abc('phase -c')
+ print 'Number of PIs in cex = %d, Number of frames = %d'%(n_cex_pis(),cex_frame())
+ run_command('testcex -a')
+ hist = hist + [typ2]
+ continue
+ if typ == 'tempor':
+ typ2 = aigs_pp('pop') #gets the aig before tempor
+ abc('tempor -c')
+ print 'Number of PIs in cex = %d, Number of frames = %d'%(n_cex_pis(),cex_frame())
+ run_command('testcex -a')
+ hist = hist + [typ2]
+ continue
+ if typ == 'reparam':
+ nn = n_latches()
+ abc('&get') #put 'after' in &space
+ typ2 = aigs_pp('pop') #get previous to put before in reg-space
+ run_command('reconcile')
+ print 'Number of PIs in cex = %d'%n_cex_pis()
+## reconcile(True) #maps the cex from &-space aig to current aig
+ run_command('testcex -a')
+ if not typ2 == 'reparam0':
+ hist = hist + [typ2] #put back (aig iss still there so just have to restore hist
+ continue
+ #else we just leave the aig updated
+ else:
+ assert typ == 'initial','type is not initial'
+ size = str([n_pis(),n_pos(),n_latches(),n_ands()])
+ print 'cex length = %d'%cex_frame()
+ tr = ['cex length = %d'%cex_frame()] + ['cex matches original aig = %s'%size]
+ print 'cex matches original aig'
+ return tr
+## print 'cex matches original aig'
+
+
+def sp(n=0,t=900,check_trace=False):
+ """Alias for super_prove, but also resolves cex to initial aig"""
global initial_f_name
- """Alias for super_prove"""
print 'Executing super_prove'
- result = super_prove(0)
- print '%s is done'%initial_f_name
+ result = super_prove(n,t)
+ print '%s is done and is %s'%(initial_f_name,result[0])
+ print 'sp: ',
+ print result
+ if result[0] == 'SAT' and check_trace:
+ res = unmap_cex()
+ result1 = result[1]+ res
+ result = ['SAT'] + result1
+ report_cex()
+ report_bmc_depth(max(max_bmc,n_bmc_frames()))
return result
+def mp():
+ multi_prove_iter()
+
+def report_cex():
+ abc('write_status %s_cex.status'%init_initial_f_name)
def sumsize(L):
d = count_less(L,0)
@@ -3027,144 +3692,1462 @@ def unmap(L,L2,map):
assert mx <= len(L2),'max of map = %d, length of L2 = %d'%(mx,len(L))
for j in range(len(map)):
L[j] = L2[map[j]] #expand results of L2 into L
+ return L
+
+def unmap2(L2,map):
+ mx = max(list(map))
+ assert mx <= len(L2),'max of map = %d, length of L2 = %d'%(mx,len(L))
+ L=[-1]*len(map)
+ for j in range(len(map)):
+ L[j] = L2[map[j]] #expand results of L2 into L
return L
def create_map(L,N):
- map = [-1]*N
+ """ map equivalence classes into their representative."""
+## print L
+ mapp = [-1]*N
+ m = -1
+ error = False
for j in range(len(L)):
lj = L[j]
for k in range(len(lj)):
- map[lj[k]] = j
- return map
+ mapp[lj[k]] = j
+## print lj
+ mm = min(lj)
+## print mm
+ if not mm == lj[0]: #check if rep is not first on list
+ print 'ERROR: rep is not first, mm = %d, lj[0] = %d'%(mm,lj[0])
+ error = True
+ if mm <= m: #check if iso_classes are in increasing order of representatives.
+ print 'ERROR: in iso map mm < m, mm = %d, m = %d'%(mm,m)
+ error = True
+ m = mm
+ assert not error,'ERROR'
+ return mapp
+
+
+def weave(L1,lst0,lst1):
+ """ interleave values of L1 and with 1's in positions given in lst1,
+ and 0's in lst0. It is assumed that these lists are in num order..
+ Final list has len = len(L1)+len(lst0)+len(lst1)"""
+ L = [-1]*(len(L1)+len(lst0)+len(lst1))
+## print len(L)
+ if lst0 == []:
+ if lst1 == []:
+ return L1
+ lst = lst1
+ v = 1
+ if lst1 == []:
+ lst = lst0
+ v = 0
+ l = k = 0
+ for j in range(len(L)):
+## print L
+ if j == lst[l]:
+ L[j] = v
+ if l+1 < len(lst):
+ l = l+1
+ else: #put in value in L1
+ L[j] = L1[k]
+ if k+1 < len(L1):
+ k = k+1
+ return L
-def mp(op='sp'):
- L = multi_prove(op,900)
- return sumsize(L)
+def quick_mp(t):
+ t1 =time.time()
+ l1 = list_0_pos()
+ S,l2,s = par_multi_sat(t)
+ l2 = indices(s,1)
+ remove(l2,1)
+ abc('scl')
+ simple()
+ ps()
+ print'time = %0.2f'%(time.time() - t1)
-def multi_prove(op='sp',tt=900):
+def indices(s,v):
+ """return in indices of s that have value v"""
+ L = []
+ for i in range(len(s)):
+ if s[i]== v:
+ L = L+[i]
+ return L
+
+def expand(s,ind,v):
+ """expand s by inserting value v at indices ind"""
+ N = len(s)+len(ind)
+ ind1 = ind+[N]
+ g = gaps(ind1)
+ ss = [-1]*N
+ for i in ind:
+ ss[i] = v
+ j = 0
+ for i in g: #put original values in ss
+ ss[i] = s[j]
+ j = j+1
+ for j in ind:
+ assert ss[j] == v, 'ss = %s, ind = %s'%(str(ss),str(ind))
+ return ss
+
+def remove_v(ss,v):
+ s = []
+ for i in range(len(ss)):
+ if ss[i] == v:
+ continue
+ else:
+ s = s + [ss[i]]
+ return s
+
+def multi_prove(op='simple',tt=900,n_fast=0, final_map=[]):
"""two phase prove process for multiple output functions"""
global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time
- global f_name_save,name_save
+ global f_name_save,nam_save,_L_last,file_out
x_init = time.time()
+ abc('&get;&scl;,reparam -aig=%s_rpm.aig; r %s_rpm.aig')
+ print 'Initial after &scl and reparam = ',
+ ps()
+ abc('w %s_initial_save.aig'%init_initial_f_name)
+ #handle single output case differently
+ _L_last = [-1]*n_pos()
+ if n_pos() == 1:
+ result = sp(2000)
+## abc('w %s_unsolved.aig'%init_initial_f_name)
+ rs=result[0]
+ if rs == 'SAT':
+ report_result(0,1)
+ L = [1]
+ elif rs == 'UNSAT':
+ report_result(0,0)
+ L = [0]
+ elif rs == 'UNDECIDED':
+ report_result(0,-1)
+ L = [-1]
+ else: #error
+ L = [2]
+ res = sumsize(L)
+ rr = '\n@@@@ Time = %d '%(time.time() - t_iter_start) + res
+ print rr
+ file_out.write(rr+ '\n')
+ file_out.flush()
+ return L
+## print 'Removing const-0 POs'
+ NNN = n_pos()
+ lst0 = listr_0_pos() #remove const-0 POs and record
+## print lst0
+ lst0.sort()
N = n_pos()
L = [-1]*N
- print 'Mapping for isomorphism: '
- iso() #reduces number of POs
- map = create_map(iso_eq_classes(),N) #creates map into original
+ print 'Removed %d const-0 POs'%len(lst0)
+ res = 'SAT = 0, UNSAT = %d, UNDECIDED = %d'%(len(lst0),N)
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ report_L(lst0,0) ##########
+ rr = rr + res
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ ttt = n_ands()/1000
+ if ttt < 10:
+ ttt=10
+ elif ttt<20:
+ ttt = 20
+ elif ttt< 30:
+ ttt = 30
+ else:
+ ttt = 50
+ S,lst1,s = par_multi_sat(ttt,1,1,1) #run engines in parallel looking for SAT outputs
+ lst1 = indices(s,1)
+## print S,lst1
+ #put 0 values into lst0
+ lst10 = indices(s,0) #new unsat POs in local indices (with lst0 removed)
+ if not lst10 == []:
+ print 'lst10 = %s'%str(lst10)
+ lst11 = indices(s,1) #local variables
+ ss = expand(s,lst0,0) #ss will reflect original indices
+ report_s(ss)
+ lst0_old = lst0
+ lst0 = indices(ss,0) #additional unsat POs added to initial lst0 (in original indices)
+ print 'lst0 = %s'%str(lst0)
+ assert len(lst0) == len(lst0_old)+len(lst10), 'lst0 = %s, lst0_old = %s, lst10 = %s'%(str(lst0),str(lst0_old),str(lst10))
+ sss = remove_v(ss,0) #remove the 0's from ss
+ assert len(sss) == len(ss)-len(lst0), 'len(sss) = %d, len(ss) = %d, len(lst0) = %d'%(len(sss),len(ss),len(lst0))
+ lst1_1 = indices(sss,1) #The sats now reflect the new local indices.
+ #It makes it as if the newly found unsat POs were removed initially
+ #done with new code
+ assert len(lst1_1) == len(lst1), 'mismatch, lst1 = %d, lst1_1 = %d'%(len(lst1),len(lst1_1))
+ lst1 = lst1_1 #lst1 should be in original minus lst0
+ print 'Found %d SAT POs'%len(lst1)
+ print 'Found %d UNSAT POs'%len(lst10)
+ res = 'SAT = %d, UNSAT = %d, UNDECIDED = %d'%(len(lst1),len(lst0),NNN-(len(lst1)+len(lst0)))
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ rr = rr + res
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ N = n_pos()
+ print len(lst10),n_pos()
+ if not len(lst10) == n_pos() and len(lst10) > 0:
+ remove(lst10,1) #remove 0 POs
+ print 'Removed %d UNSAT POs'%len(lst10)
+ N = n_pos()
+ elif len(lst10) == n_pos():
+ N = 0 #can't remove all POs. Must proceed as if there are no POs. But all POs are UNSAT.
+ print len(lst1),N,S #note: have not removed the lst1 POs.
+ if len(lst1) == N or S == 'UNSAT' or N == 0: #all POs are SAT
+ L = [0]*N #could just put L as all 1's. If N = 0, all POs are UNSAT and lst1 = []
+ for i in range(len(lst1)): #put 1's in SAT POs
+ L[lst1[i]]=1
+ L = weave(L,lst0,[]) #expand L, and put back 0 in L.
+ report_results(L)
+ print sumsize(L)
+ print 'Time = %.2f'%(time.time() - x_init)
+ return L
+## print 'removing them'
+ if not len(lst1)== n_pos():
+ remove(lst1,1) #here we removed all POs in lst1 (local indices)
+ abc('&get;&scl;&put')
+ ## lst1 = bmcj_ss_r(2) #find easy cex outputs
+ ## write_file('bmc1')
+ print 'Removed %d SAT POs'%len(lst1)
+ N = n_pos()
+ else:
+ N = 0
+ if N == 1: #this was wrong. Need to report in original indices???
+ result = sp(2000)
+ rs=result[0]
+ #need to find out original index of remaining PO
+ if rs == 'SAT':
+ v = 1
+ elif rs == 'UNSAT':
+ v = 0
+ elif rs == 'UNDECIDED':
+ v = -1
+ else: #error should not happen, but be conservative
+ v = -1
+ L = [v]*N
+ L = weave(list(L),[],lst1) #put back 1 in L
+ L = weave(list(L),lst0,[]) #put back 0 in L
+ report_results(L)
+ res = sumsize(L)
+ rr = '\n@@@@ Time = %d '%(time.time() - t_iter_start) + res
+ print rr
+ file_out.write(rr+ '\n')
+ file_out.flush()
+ return L
+ L1 =L = [-1]*N
+ if N > 1 and N < 10000 and n_ands() < 500000: #keeps iso in
+## if N > 1 and N < 10000 and False: #temporarily disable iso
+ print 'Mapping for first isomorphism: '
+ res = iso() #reduces number of POs
+ if res == True:
+ abc('&get;&scl;&put')
+ write_file('iso1')
+ leq = eq_classes()
+## print leq
+ map1 = create_map(leq,N) #creates map into original
+## print map1
+ if not count_less(L,0) == N:
+ print L
+ L1 = [-1]*n_pos()
+## L1 = pass_down(L,list(L1),map1) # no need to pass down because L will have no values at this point.
+ else:
+ map1 =range(N)
+ else:
+ map1 = range(N)
N = n_pos()
+## print 4
r = pre_simp() #pre_simp
- write_file('smp')
- NP = n_pos()/N #if NP > 1 then NP unrollings were done.
+ write_file('smp1')
+ NP = n_pos()/N #if NP > 1 then NP unrollings were done in pre_simp.
+ if NP > 1:
+ L1 = duplicate_values(L1,NP) # L1 has only -1s here. Put in same valuess for iso POs
if n_pos() > N:
assert NP>=2, 'NP not 2, n_pos = %d, N = %d, NP = %d'%(n_pos(),N,NP)
print 'pre_simp done. NP = %d\n\n'%NP
#WARNING: if phase abstraction done, then number of POs changed.
- if r == Unsat:
- L = [0]*N #all outputs are UNSAT
+ if r[0] == Unsat:
+ print 'example is UNSAT'
+ L1 = [0]*N #all outputs are UNSAT
+ print sumsize(L1)
+ print 'unmapping for iso'
+ L = unmap(list(L),L1,map1)
+ print "putting in easy cex's and easy unsat's"
+ L = weave(list(L),[],lst1) #put back 1 in L
+ L = weave(list(L),lst0,[]) #put back 0 in L
print sumsize(L)
+ print 'Time = %.2f'%(time.time() - x_init)
+ report_results(L)
return L
f_name_save = f_name
- name_save = '%s_initial_save.aig'%f_name
- abc('w %s'%name_save)
- L1 = [-1]*n_pos() # L1 has length after pre_simp
-## L1= mark_const_pos(L1) #puts const values in L1
-## print sumsize(L1)
+ nam_save = '%s_initial_save.aig'%f_name
#########do second iso here
N = n_pos()
- print 'Mapping for isomorphism: '
- iso() #second iso - changes number of POs
- map2 = create_map(iso_eq_classes(),N) #creates map into previous
- L2 = [-1]*n_pos()
- L2 = mark_const_pos(L2) #populates L2 with constants
- print sumsize(L2)
- #########second iso done
- abc('r %s'%name_save)
- L2 = mprove(L2,op,10) #populates L2 with results
- print sumsize(L2)
+ if N == 1:
+ map2 = [0]
+ L2=[-1]
+## write_file('1')
+## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP)
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
+ result = simple(2000,1)
+ Ss = rs = result[0]
+ if rs == 'SAT':
+ L2 = [1]
+ if rs == 'UNSAT':
+ L2 = [0]
+ else:
+## if False and N < 10000: #temp disable iso
+ if N < 10000 and n_ands() < 500000:
+ print 'Mapping for second isomorphism: '
+ res = iso() #second iso - changes number of POs
+ if res == True:
+ abc('&get;&scl;&put')
+ map2 = create_map(eq_classes(),N) #creates map into previous
+ else:
+ map2 = range(n_pos())
+ else:
+ map2 = range(n_pos())
+ write_file('iso2')
+ print 'entering par_multi_sat'
+ S,lbmc,s = par_multi_sat(2*ttt,1,1,1) #look for SAT POs
+ lmbc = indices(s,1)
+ print 'par_multi_sat ended'
+ if len(lmbc)>0:
+ print 'found %d SAT POs'%len(lmbc)
+ L2 = s
+## #first mprove for 10-20 sec.
+ ps()
+ print 'Before first mprove2, L2 = %s'%sumsize(L2)
+ DL = output2(list(L2),map1,map2,lst0,lst1,NP) #reporting intermediate results
+## DDL = output3(range(len(L2)),map1,map2,lst0,lst1,NP)
+## print 'DDL = %s'%str(DDL)
+ if n_fast == 1:
+ abc('w %s_unsolved.aig'%init_initial_f_name)
+ return DL
+ NN=n_ands()
+ #create timeout time for first mprove2
+ ttt = 10
+ if NN >30000:
+ ttt = 15
+ if NN > 50000:
+ ttt = 20
+ abc('w %s_before_mprove2.aig'%f_name)
+ print '%s_before_mprove2.aig written'%f_name
+ print 'L2 = %s'%str(L2)
+ print 'Entering first mprove2 for %d sec.'%ttt,
+ Ss,L2 = mprove2(list(L2),op,ttt,1) #populates L2 with results
+## print Ss,L2
+ if Ss == 'SAT':
+ print 'At least one PO is SAT'
+ if Ss == 'ALL_SOLVED':
+ if count_less(L2,0)>0:
+ print 'ERROR'
+## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) # final report of results.
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
+ return L
+ print 'After first mprove2: %s'%sumsize(L2)
time_left = tt - (time.time()-x_init)
- print '\n\n\n*********time left = %d ********\n\n\n'%time_left
N = count_less(L2,0)
- if N > 0:
+ if N > 0 and n_fast == 0:
+## output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) #reporting new intermediate results
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
t = max(100,time_left/N)
- L2 = mprove(L2,op,t) #populates L2 with more results
+ t_all = 100
S = sumsize(L2)
T = '%.2f'%(time.time() - x_init)
print '%s in time = %s'%(S,T)
- ########undo second iso
- L1 = unmap(L1,L2,map2)
- print 'unmapping for iso: ',
- print sumsize(L1)
- #############
+ abc('w %s_unsolved.aig'%init_initial_f_name)
+ N = n_pos()
+ ttime = 100
+ J = slps+intrps+pdrs+bmcs+sims
+ #do each output for ttime sec.
+ Nn = count_less(L2,0)
+## assert N == len(L2),'n_pos() = %d, len(L2) = %d'%(N,len(L2))
+ if Nn > 0:
+ found_sat = 0
+ print 'final_all = %d, Ss = %s'%(final_all,str(Ss))
+ if final_all and not Ss == 'SAT':
+ print 'Trying to prove all %d remaining POs at once with super_prove'%Nn
+ remove_proved_pos(L2)
+ result = super_prove()
+ if result[0] == 'UNSAT': #all remaining POs are UNSAT
+ for i in range(len(L2)):
+ if L2[i] < 0:
+ L2[i] = 0
+## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) # final report of results.
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
+ return L
+ if result == 'SAT':
+ found_sat = 1
+ if found_sat or not final_all or Ss == 'SAT':
+ print 'Trying each remaining PO for %d sec.'%ttime
+ found_sat = 0
+## ttime = 10
+ for i in range(N):
+ if L2[i] > -1:
+ continue
+ print '\n**** cone %d ****'%i
+ abc('r %s_unsolved.aig'%init_initial_f_name)
+ abc('cone -s -O %d'%i)
+ abc('&get;&scl;&lcorr;&put')
+ result = verify(J,ttime)
+ r = result[0]
+ if r > 2:
+ continue
+ elif r == 2:
+ L2[i] = 0
+ else:
+ L2[i] = 1
+ found_sat = 1
+## output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP)
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
+ if Ss == 'SAT' and found_sat: #previous solve_all was SAT and found at least 1 PO SAT
+ abc('r %s_unsolved.aig'%init_initial_f_name)
+ if not count_less(L2,0) == 0:
+ remove_proved_pos(L2)
+ simplify()
+ write_file('save')
+ result = simple(2000,1)
+ if_found = False
+ if result[0] == 'UNSAT':
+ for i in range(N):
+ if L2[i] == -1:
+ L2[i] = 0
+ elif result[0] == 'SAT' and n_pos() == 1:
+ for i in range(N):
+ if L2[i] == -1:
+ if if_found == True:
+ print 'Error: more that 1 UNDECIDED remained in L2'
+ break
+ L2[i] = 1
+ if_found = True
+ else:
+ if result[0] == 'SAT':
+ print 'at least 1 unsolved PO is SAT'
+## L = output(list(L),list(L1),L2,map1,map2,lst0,lst1,NP) # final report of results.
+ L = output2(list(L2),map1,map2,lst0,lst1,NP)
+ return L
+
+def create_unsolved(L):
+ abc('r %s_initial_save.aig'%init_initial_f_name)
+ lst = []
+ assert len(L) == n_pos(),'lengths of L and n_pos = %d,%d'%(len(L),n_pos())
+ for i in range(len(L)):
+ if L[i] > -1: #solved PO
+ lst = lst + [i]
+ assert max(lst) < n_pos(), 'error in lengths'
+ assert count_less(L,0) == n_pos() - len(lst),'mismatch'
+ remove(lst,-1) # remove solved
+
+def multi_prove_iter():
+ global t_iter_start,file_out,ff_name
+ ff_name = init_initial_f_name
+ file_out = open('%s_time_results.txt'%init_initial_f_name, 'w') #
+ t_iter_start = time.time()
+ L = multi_prove()
+ d = count_less(L,0)
+ u = count_less(L,1)-d
+ s = count_less(L,2) - (d+u)
+ rr = '\n@@@@@ %s: Final time = %.2f '%(init_initial_f_name,(time.time() - t_iter_start))
+ rr = rr + 'SAT = %d, UNSAT = %d, UNDECIDED = %d '%(s,u,d)
+ print rr
+ file_out.write(rr)
+ res = PO_results(L)
+ file_out.write(res)
+## print res
+ file_out.flush()
+ file_out.close()
+ #at this point could restrict to SAT(UNSAT) POs and invoke solver to verify all POs are SAT(UNSAT)
+ return
+
+def restrict_v(L,v):
+ """ L is a list of 0,1,-1"""
+ lst = []
+ for j in range(len(L)):
+ if L[j] == v:
+ lst = lst + [j]
+ restrict(lst)
+ return lst
+
+def PO_results(L):
+ global ff_name
+ S=U=UD=[]
+ for j in range(len(L)):
+ ll = L[j]
+ if ll == -1:
+ UD = UD + [j]
+ elif ll == 0:
+ U = U + [j]
+ elif ll == 1:
+ S = S + [j]
+ else:
+ print 'error, L contains a non -1,0,1'
+ res = "[[SAT = %s], [UNSAT = %s], [UNDECIDED = %s]"%(str(S),str(U),str(UD))
+ #restore initial unsolved POs
+ abc('r %s.aig'%ff_name)
+ if not UD == []:
+ restrict(UD,0)
+ abc('w %s_UNSOLVED.aig'%ff_name)
+ print 'Unsolved POs restored as %s_UNSOLVED.aig'%ff_name
+ else:
+ print 'All POs were solved'
+ abc('r %s.aig'%ff_name) #what if original had constraints.
+ abc('fold')
+ if not U == []:
+ restrict(U,1) #we use 1 here because do not want to remove const-0 POs which should be in U
+ abc('w %s_UNSAT.aig'%ff_name)
+ print 'Unsat POs restored as %s_UNSAT.aig'%ff_name
+ abc('r %s.aig'%ff_name)
+ abc('fold')
+ if not S == []:
+ restrict(S,0)
+ abc('w %s_SAT.aig'%ff_name)
+ print 'Sat POs restored as %s_SAT.aig'%ff_name
+ return res
+
+def syn3():
+ t = time.clock()
+ run_command('&get;&b; &jf -K 6; &b; &jf -K 4; &b;&put')
+ ps()
+ print 'time = %.2f'%(time.clock() - t)
+
+def syn4():
+ t = time.clock()
+ abc('&get;&b; &jf -K 7; &fx; &b; &jf -K 5; &fx; &b;&put')
+ ps()
+ print 'time = %.2f'%(time.clock() - t)
+
+
+def solve_parts(n):
+ global t_iter_start,file_out
+ r=range(n)
+ r.reverse()
+ name = init_initial_f_name
+ results = []
+ for i in r:
+ file_out.write('\n@@@@ Starting part%d: \n'%i)
+ file_out.flush()
+ abc('r %s_part%d.aig'%(name,i))
+ print '\nPart%d: '%i
+ L = multi_prove()
+ rr = '\n@@@@ Time = %.2f '%(time.time() - t_iter_start)
+ rr = rr + 'Part%d: '%i
+ ssl = sumsize(L)
+ rr = rr + ssl
+ results = results + [[ssl]]
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ return results
+
+def cp(n=10):
+ return chop_prove(n)
+
+def chop_prove(n=10,t=100):
+ global t_iter_start,file_out
+ tm = time.time()
+ abc('w %s_chop_temp.aig'%f_name)
+ N = max(5,n_pos()/n)
+ J = 0
+ total = []
+ np = n_pos()
+ while J < np:
+ abc('r %s_chop_temp.aig'%f_name)
+ E = J+N-1
+ R = N
+ if E > np-1:
+ R = N - (E - (np -1))
+ abc('cone -s -O %d -R %d'%(J,R))
+ npp = n_pos()
+ print '\n\n***** solving outputs %d to %d *****'%(J,(J+R-1))
+ f_map = str([J]*R + range(R))
+ funcs = create_funcs(slps,t)
+ funcs = funcs + [eval('(pyabc_split.defer(mp)(simple,%s,1,%s))'%(t,f_map))] #1 means do fast mp
+## funcs = funcs + [eval('(pyabc_split.defer(sp)())')]
+ for i,res in pyabc_split.abc_split_all(funcs):
+ print 'Method %d returned first with result = %s'%(i,res)
+ if i == 0:
+ res = 'SAT = 0, UNSAT = 0, UNDECIDED = %d'%npp
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ rr = rr + 'chop%d: '%i
+ rr = rr + res
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ break
+ if i == 1:
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ rr = rr + 'chop%d: '%i
+ rr = rr + res
+ file_out.write(rr + '\n')
+ file_out.flush()
+## print res
+ break
+ else:
+ if res == 'UNSAT':
+ res = 'SAT = 0, UNSAT = %d, UNDECIDED = 0'%npp
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ rr = rr + 'chop%d: '%i
+ rr = rr + res
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ break
+ else:
+ res = 'SAT = 0, UNSAT = 0, UNDECIDED = %d'%npp
+ rr = '\n@@@@ Time = %.2f: '%(time.time() - t_iter_start)
+ rr = rr + 'chop%d: '%i
+ rr = rr + res
+ print rr
+ file_out.write(rr + '\n')
+ file_out.flush()
+ break
+## print res
+ total = total + [[res]]
+ print total
+ J = J + R
+ c = get_counts(total)
+ tm = time.time() - tm
+ rr = '\n@@@@ Total time for chop = %.2f, SAT = %d, UNSAT = %d, UNDECIDED = %d'%(tm,c[0],c[1],c[2])
+ file_out.write(rr + '\n')
+ file_out.flush()
+ print rr
+ return total
+
+def get_counts(L):
+ s=u=d=0
+ for i in range(len(L)):
+ li = L[i][0]
+## print li
+ j1=li.find('=')
+ j2 = li.find(',')
+ num = int(li[j1+1:j2])
+ s = s+num
+ li = li[j2+1:]
+ j1=li.find('=')
+ j2 = li.find(',')
+ num = int(li[j1+1:j2])
+ u = u+num
+ li = li[j2+1:]
+ j1=li.find('=')
+ j2 = li.find(',')
+ num = int(li[j1+1:])
+ d = d+num
+ return [s,u,d]
+
+
+def output(L,L1,L2,map1,map2,lst0,lst1,NP,final_map=[]):
+ global t_iter_start
+ print_all(L,L1,L2,map1,map2,lst0,lst1,NP,final_map=[])
+ #print 'L = %s, L1 = %s, L2 = %s'%(sumsize(L),sumsize(L1),sumsize(L2))
+ L1 = unmap(list(L1),L2,map2)
+ print 'L1 after map2 = %s'%sumsize(L1)
if NP > 1: #an unrolling was done
- L1 = check_L(NP,L1) #map into reduced size before unrolling was done.
- print 'unmapping for unrolling.',
- print sumsize(L1)
- L = unmap(L,L1,map)
- print 'unmapping for iso',
+ L1 = check_and_trim_L(NP,list(L1))#map into reduced size before unrolling was done by phase.
+ print 'L1 = %s'%sumsize(L1)
+ L = unmap(list(L),L1,map1)
+ print 'L after map1 = %s'%sumsize(L)
+ L = weave(list(L),[],lst1) #put back 1 in L
+ print 'L after lst1 = %s'%sumsize(L)
+ L = weave(list(L),lst0,[]) #put back 0 in L
+ print 'L after lst0= %s'%sumsize(L)
+ report_results(list(L),final_map)
+ return L
+
+def output2(L2,map1,map2,lst0,lst1,NP,final_map=[]):
+ global t_iter_start
+## print_all(L,L1,L2,map1,map2,lst0,lst1,NP,final_map=[])
+ #print 'L = %s, L1 = %s, L2 = %s'%(sumsize(L),sumsize(L1),sumsize(L2))
+ L1 = unmap2(L2,map2)
+ print 'L1 after map2 = %s'%sumsize(L1)
+## if NP > 1: #an unrolling was done
+## L1 = check_and_trim_L(NP,list(L1))#map into reduced size before unrolling was done by phase.
+## print 'L1 = %s'%sumsize(L1)
+ L = unmap2(L1,map1)
+ print 'L after map1 = %s'%sumsize(L)
+ L = weave(list(L),[],lst1) #put back 1 in L
+ print 'L after lst1 = %s'%sumsize(L)
+ L = weave(list(L),lst0,[]) #put back 0 in L
+ print 'L after lst0= %s'%sumsize(L)
+ report_results(list(L),final_map)
+ return L
+
+def output3(L2,map1,map2,lst0,lst1,NP,final_map=[]):
+ """ find out where results came from"""
+ global t_iter_start
+ L1 = unmap2(L2,map2)
+ L = unmap2(L1,map1)
+ L = weave(list(L),[],lst1) #put back 1 in L
+ L = weave(list(L),lst0,[]) #put back 0 in L
+ return L
+
+def print_all(L,L1,L2,map1,map2,lst0,lst1,NP,final_map=[]):
+## return
+ print 'L = ',
+ print L
+ print 'L1 = ',
+ print L1
+ print 'L2 = ',
+ print L2
+ print 'map1 = ',
+ print map1
+ print 'map2 = ',
+ print map2
+ print 'lst0 = ',
+ print lst0
+ print 'lst1 = ',
+ print lst1
+
+
+def rnge(n,m):
+ """ return interval n+range(m)"""
+ N = []
+ for j in range(m):
+ N = N + [n + j]
+ return N
+
+def create_cluster(n=0,p=1,L=100):
+ """n is the start node and p is the multiplier on the # of POs to extract
+ ll is the limit on the number of latches to include"""
+ clstr=rem = [] #make a list of nodes to remove because not compatible
+ N = 0 #number of end skips
+ init = False
+ skip=0 #number of initial skips
+ abc('w temp.aig')
+ np = n_pos()
+ for i in range(np):
+ if n + p*(i+1-skip) > np:
+ if n_latches() > L:
+ bp = n_pos()-p
+ remove(rnge(bp,p),1) #remove last p
+ abc('scl')
+ return clstr
+ abc('r temp.aig')
+ abc('cone -s -O %d -R %d;scl'%(n,p*(i+1-skip)))
+ xx = n_pos()
+ if n_latches() > L:
+ if not init: #have not found start point yet
+ n=n+p #increase start point
+ print 'n,FF = %d,%d'%(n,n_latches())
+ skip = skip + 1
+ continue
+ else:
+ if not init:
+## nn=p*(i-skip)
+## clstr = clstr + rnge(nn,p*(i+1-skip))
+## print clstr #initial cluster
+ init = True
+## abc('w old.aig')
+ remove(rem,1)
+ abc('scl')
+ ps()
+ if n_latches() > L:
+ x = xx - p #remove last p POs
+ rem = rem + rnge(x,p)
+## print len(rem)
+ print 'x,len(rem) = %d,%d,%d'%(x,len(rem))
+ N = N+1
+ else:
+ bn=p*(i-skip)
+ nr=rnge(bn,p)
+ clstr = clstr + nr
+ if N > 100: #don't do more than 10 end-skips
+ bp = n_pos()-p
+ remove(rnge(bp,p),1) #put last p on remove list
+ abc('scl')
+ return clstr
+
+def generate_clusters(b=0,inc=10,end=100):
+ abc('w temp_gen_clstr.aig')
+ abc('w t_gen_cl.aig')
+ clusters = []
+ while True:
+ abc('r t_gen_cl.aig')
+ clstr = create_cluster(b,inc,end)
+ clusters = clusters + [clstr]
+ abc('r t_gen_cl.aig')
+ if clstr == []:
+ return clusters
+ remove(clstr,1)
+ abc('w t_gen_cl.aig')
+
+def map_clusters_to_original(cl):
+ L = range(n_pos())
+ Clstrs = []
+ k = 0
+ for j in range(len(cl)):
+ c = cl[j]
+ cc = pick(L,c)
+ Clstrs = Clstrs + [cc]
+ L = pick_not(L,cc)
+ return Clstrs
+
+def pick(L,c):
+ """ computes L(c) """
+ x=[]
+ for i in range(len(c)):
+ x = x + [L[c[i]]]
+ return x
+
+def pick_not(L,c):
+ """ computes L(~c)"""
+ x = []
+ for i in range(len(L)):
+ if not i in c:
+ x = x + [L[i]]
+ return x
+
+def report_L(lst=[],v=0):
+ """lst must refer to original PO numbering"""
+ global _L_last
+ if lst == []:
+ return
+ for j in lst:
+ if _L_last[j] == -1: #means not reported yet
+ _L_last[j] = v
+ report_result(j,v)
+
+def report_s(s):
+ """s must refer to original PO numbering
+ Differs from above """
+ global _L_last
+ assert len(s) == len(_L_last), 'two lengths are not equal'
+ if s == []:
+ return
+ for j in range(len(s)):
+ if not _L_last[j] == s[j]: #means not reported yet
+ assert _L_last[j] == -1, 'j = %d, _L_last[j] = %d, s[j] = %d'%(j,_L_last[j],s[j])
+ if _L_last[j] == -1:
+ _L_last[j] = s[j]
+ report_result(j,s[j])
+
+
+def report_results(L,final_map=[],if_final=False):
+ global _L_last,t_iter_start,file_out
+ out = '\n@@@@ Time = %.2f: results = %s'%((time.time()- t_iter_start),sumsize(L))
+ print out
+ file_out.write(out + '\n')
+ file_out.flush()
+ for j in range(len(L)):
+ if not L[j] == _L_last [j]:
+ assert _L_last[j] == -1, '_L_last[j] = %d, L[j] = %d'%(_L_last[j],L[j])
+ report_result(j,L[j])
+ _L_last = list(L) #update _L_last
+## print 'report: _L_last = %s'%sumsize(_L_last)
+ print '\n'
+
+def report_result(POn, REn, final_map=[]):
+ if final_map == []:
+ print 'PO = %d, Result = %d: '%(POn, REn),
+ else:
+ print 'PO = %d, Result = %d: '%(final_map[POn], REn),
+
+
+def scorr_T(t=10000):
+ global smp_trace, scorr_T_done
+ if scorr_T_done:
+ return
+ scorr_T_done = 1
+ print 'Trying scorr_T (scorr -C 2, &scorr, &scorr -C 0)'
+ funcs = [eval('(pyabc_split.defer(abc)("scorr -C 2"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("&get;&scorr;&put"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("&get;&scorr -C 0;&put"))')]
+ funcs = create_funcs(slps,t)+funcs
+ mtds = sublist(methods,slps) + ['scorr2','&scorr','&scorr0']
+ best = n_ands()
+ abc('w %s_best_T.aig'%f_name)
+ name1 = '%s_sc1.aig'%f_name
+ if os.access(name1,os.R_OK):
+ os.remove(name1)
+ name2 = '%s_sc2.aig'%f_name
+ if os.access(name2,os.R_OK):
+ os.remove(name2)
+ name3 = '%s_sc3.aig'%f_name
+ if os.access(name3,os.R_OK):
+ os.remove(name3)
+ N=m_best = 0
+ for i,res in pyabc_split.abc_split_all(funcs):
+ if i == 0:
+ break
+ if i == 1:
+ abc('w %s_sc1.aig'%f_name)
+ print 'scorr: ',
+ ps()
+ N=N+1
+ if N == 3 or n_latches() == 0:
+ break
+ if i == 2 or n_latches() == 0:
+ abc('w %s_sc2.aig'%f_name)
+ print '&scorr: ',
+ ps()
+ N=N+1
+ if N == 3:
+ break
+ if i == 3 or n_latches() == 0:
+ abc('w %s_sc3.aig'%f_name)
+ print '&scorr0: ',
+ ps()
+ N=N+1
+ if N == 3:
+ break
+ if os.access(name1,os.R_OK):
+ abc('r %s'%name1)
+ if n_ands() < best:
+ best = n_ands()
+ m_best = 1
+ abc('w %s_best_T.aig'%f_name)
+ if os.access(name2,os.R_OK):
+ abc('r %s'%name2)
+ if n_ands() < best:
+ m_best = 2
+ best = n_ands()
+ abc('w %s_best_T.aig'%f_name)
+ if os.access(name3,os.R_OK):
+ abc('r %s'%name3)
+ if n_ands() < best:
+ m_best = 3
+ best = n_ands()
+ abc('w %s_best_T.aig'%f_name)
+ smp_trace = smp_trace + ['%s'%mtds[m_best]]
+ abc('r %s_best_T.aig'%f_name)
+
+def pscorr(t=900):
+ result = par_scorr(t)
+ if n_ands() == 0:
+ return result
+ else:
+ return 'UNDECIDED'
+
+def par_scorr(t=30,ratio = 1):
+ t_init = time.time()
+## abc('dr -m;drw')
+ abc('dretime;dc2')
+ funcs = [eval('(pyabc_split.defer(abc)("scorr -vq -F 1"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("scorr -vq -F 2"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("scorr -vq -F 4"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("scorr -vq -F 8"))')]
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("scorr -vq -F 16"))')]
+ funcs = create_funcs(slps,t)+funcs
+ mtds = sublist(methods,slps) + ['scorr1','scorr2', 'scorr4', 'scorr8', 'scorr16']
+ best = n_ands()
+ print 'par_scorr: best = %d'%best
+ abc('w %s_best.aig'%f_name)
+ idone = []
+ for i,res in pyabc_split.abc_split_all(funcs):
+## print i,res
+ if i == 0: #timeout
+ break
+ else:
+ idone = idone + [i]
+ if n_ands() <= ratio * best:
+ best = n_ands()
+## print 'par_scorr: best = %d, method = %s'%(best, mtds[i])
+ abc('w %s_best.aig'%f_name)
+ if best == 0 or len(idone) >= 5:
+ mtd = mtds[i]
+ break
+ else:
+ break
+## print 'Time: %.2f'%(time.time() - t_init)
+ abc('r %s_best.aig'%f_name)
+## if best == 0:
+## print mtd
+ return mtd
+
+def par_scorr_q(t=10000,ratio = 1):
+ abc('dretime;dc2')
+ abc('bmc2 -T 5')
+ depth = n_bmc_frames()
+ mtds = funcs = []
+ n=1
+ while True:
+ funcs = funcs + [eval('(pyabc_split.defer(abc)("scorr -vq -F %d"))'%n)]
+ mtds = mtds + ['scorr%d'%n]
+ n = 2* n
+ if n > max(1,min(depth,16)):
+ break
+ funcs = create_funcs(slps,t)+funcs
+ mtds = sublist(methods,slps) + mtds
+ best = n_ands()
+ print 'best = %d'%best
+ abc('w %s_best.aig'%f_name)
+ idone = []
+ for i,res in pyabc_split.abc_split_all(funcs):
+## print i,res
+ if i == 0:
+ break
+ else:
+ idone = idone + [i]
+ if n_ands() <= ratio * best:
+ best = n_ands()
+ print 'best = %d, method = %s'%(best, mtds[i])
+ abc('w %s_best.aig'%f_name)
+ if best == 0 or len(idone) >= len(mtds)-1:
+ break
+ else:
+ break
+ abc('r %s_best.aig'%f_name)
+
+
+def indicate_0_pos(L2):
+ """
+ puts 0's in L2 where the corresponding output is driven by a const-0
+ """
+## assert n_pos() == len(L2), 'list L2=%d and n_pos=%d in current AIG dont match'%(len(L2),n_pos())
+ for j in range(n_pos()):
+ i=is_const_po(j)
+ if i == 0:
+ L2[j]=0
+ return L2
+
+def list_0_pos():
+ """
+ returns indices of outputs driven by const-0
+ """
+ L = []
+ for j in range(n_pos()):
+ i=is_const_po(j) #returns const value of PO if const. Else -1
+ if i == 0:
+ L = L + [j]
+ return L
+
+def mprove2(L=0,op='simple',t=100,nn=0):
+ global _L_last, f_name, skip_spec
+ print 'mprove2 entered' ,
+ if L == 0:
+ L = [-1]*n_pos()
+ ps()
+ print 'mprove2 entered with L = ',
+ print sumsize(L)
+ abc('w %s_mp2.aig'%f_name) #save aig before pos removed
+ old_f_name = f_name #we may call sp() which can change f_name
+ n = count_less(L,0)
+ ind = []
+ for j in range(len(L)):
+ if L[j] > -1:
+ ind = ind +[j]
+ if len(ind) == n_pos(): #all POs already solved
+ return 'ALL_SOLVED',L
+ remove(ind,-1) #remove solved POs
+ if len(ind)>0:
+ print 'Removed %d proved POs'%len(ind)
+ if n_pos() == 0:
+ f_name = old_f_name
+ abc('r %s_mp2.aig'%f_name)
+ return 'ALL_SOLVED',L
+ ps()
+ N = n_pos()
+ if N == 1: #only one PO left
+ v = -1
+ skip_spec_old = skip_spec
+ skip_spec = True
+ result = simple(2000,1)
+ ff_name == f_name
+ result = sp(0,2000) #warning sp() can change f_name. 0 means simplify
+ f_name = ff_name
+ skip_spec = skip_spec_old
+ res = result[0]
+ print 'result of sp = ',
+ print res
+#### print result
+ if res == 'SAT':
+ v = 1
+ if res == 'UNSAT':
+ v = 0
+ i = L.index(-1)
+## print 'i=%d,v=%d,L=%s'%(i,v,str(L))
+ L[i] = v
+ f_name = old_f_name #if sp() changed f_name need to revert to old f_name
+ abc('r %s_mp2.aig'%f_name)
+ print 'reverting %s_mp2.aig'%f_name,
+ ps()
+ print sumsize(L)
+ if v > -1:
+ res = 'ALL_SOLVED'
+ return res,L
+ r = pre_simp()
+ NP = n_pos()/N
+ L1 = [-1]*n_pos()
+ Llst0 = []
+ if r[0] == Unsat:
+ L1 = [0]*N
+ else:
+ Llst0 = list_0_pos()
+ Llst0.sort()
+ print 'Llst0 = %s'%str(Llst0)
+ n_0 = len(Llst0)
+ if n_0 > 0:
+## print 'Found %d const-0 POs'%n_0
+ remove(Llst0,0)
+ print 'Removed %d const-0 POs'%len(Llst0)
+ if NP > 1: # we want to do iso here because more than one phase was found.
+ iso() # additional iso - changes number of POs
+ map3 = create_map(eq_classes(),N) #creates map into previous
+## tb = min(n_pos(),20)
+ N = n_pos()
+ tb = min(N,50)
+## print 'Trying par_multi_sat for %d sec.'%tb
+ S,lst1,s = par_multi_sat(tb,1,1,0) #this gives a list of SAT POs
+ L2 = s10 = s
+ n_solved = n_pos() - count_less(s10,0)
+ if 1 in s10 or 0 in s10: #something solved
+ if n_solved < N: #not all solved
+ rem = indices(s,0)+indices(s,1)
+ rem.sort()
+ remove(rem,1)
+ """ if lst1 > 1 element, simplify and run par_multi_sat again to get lst2
+ then merge lst1 and lst2 appropriately to create new lst1 for below.
+ """
+ tb = tb+25
+ gap = max(15,.2*tb)
+ if len(rem) > 0:
+ s210 = s10
+ #iterate here as long as more than 1 PO is found SAT
+ n_solved = n_pos() - count_less(s210,0)
+ while n_solved > 0:
+ gap = int(1+1.2*gap)
+ print 'gap = %.2f'%gap
+ pre_simp(1) #warning this may create const-0 pos
+ S,lst2,s = par_multi_sat(tb,gap,1,0) #this can find UNSAT POs
+ s210 = s
+ n_solved = n_pos() - count_less(s210,0)
+ s10 = put(s210,list(s10)) #put the new values found into s10
+ if count_less(s10,0) == 0 or n_solved == 0: #all solved or nothing solved
+ break #s10 has the results
+ else:
+ out = '\n@@@@ Time = %.2f: additional POs found SAT = %d'%((time.time()- t_iter_start),len(lst2))
+ print out
+ file_out.write(out + '\n')
+ file_out.flush()
+ rem = indices(s210,0)+indices(s210,1)
+ rem.sort()
+ remove(rem,1) #this zeros the l210 and then removes ALL const-1 POs.
+ #If there are more than lst2 removed, it fires an assertion.
+ continue
+ L2 = s10 #put results in s10
+ else: #lst1 is empty or S == SAT'
+ print 'no cex found or S = UNSAT'
+ else: #all solved
+ print 'all POs solved'
+ print 'Removed %d solved POs'%(len(s10) - count_less(s10,0))
+ else:
+ print 'nothing solved'
+ write_file('bmc2')
+ if -1 in s10:
+ print 'Entering solve_all ',
+ ps()
+ S,s210 = solve_all([-1]*n_pos(),900) #solve_all calls sp() or simple but preserves the aig and f_name
+## else: zzz
+ if -1 in s210: #then no POs were solved by solve_all
+## abc('r %s_smp2_2.aig'%f_name)
+ if n_pos() < 50:
+ print 'Entering mprove with %d sec. for each cone'%t,
+ ps()
+ print 'L2 before mprove: %s'%sumsize(L2)
+ s210 = mprove([-1]*n_pos(),op,t) #proving each output separately
+ else:
+ s210 = [-1]*n_pos()
+ print 's210 after mprove and before inject 1 %s:'%sumsize(s210)
+ L2 = put(s210,s10)
+ print 'L2 after inject 1 %s:'%sumsize(L2)
+ else: #all POs solved
+ L2 = s10
+ assert NP == 1, 'NP > 1: ERROR'
+ if NP>1:
+ print 'NP = %d'%NP
+ print 'L1 before unmap3: %s'%sumsize(L1) #L1 should be all -1's of length before iso
+ L1 = unmap(list(L1),L2,map3)
+ print 'L1 after unmap of map3: ',
+ print sumsize(L1)
+ else:
+ print 'L2 = %s'%str(L2)
+ L1 = L2
+ L1 = inject(list(L1),Llst0,0)
+ print 'L1 after inject of Llst0 0s: %s:'%sumsize(L1)
+ if NP >1:
+ L1 = check_and_trim_L(NP,L1)
+ assert len(L1)<=len(L),"L1 = %d larger than L = %d"%(len(L1),len(L))
+## print 'L = %s'%str(L)
+ L = insert(L1,list(L)) # replace -1s in L with values in L1. Size of L1<=L L is really L2
print sumsize(L)
+ f_name = old_f_name
+ abc('r %s_mp2.aig'%f_name) #restore aig
+ if 1 in L:
+ S = 'SAT'
+ if not -1 in L:
+ S = 'ALL_PROVED'
+ return S,L
+
+def merge(L1,L2,n=0):
+ """L2 refers to POs that were solved after POs in L1 were removed
+ modifies L2 to refer to the original POs.
+ if n=0 adds in L1 and sorts
+ """
+ if L1 == []:
+ return L2
+ if L2 == []:
+ if n == 0:
+ return L1
+ else:
+ return []
+ m = max(L1)
+ LL1 = L1 + [3+m+max(L2)] #this makes sure that there are enough gaps
+ g = gaps(LL1)
+## print g
+ L = []
+ for i in range(len(L2)):
+ l2i=L2[i]
+ assert l2i < len(g),'ERROR, L2 = %s,g = %s'%(str(L2),str(g))
+ L = L + [g[l2i]]
+## print L
+ if n == 0:
+ L = L + L1
+ L.sort()
+ return L #L is already sorted
+
+def put(s2,s11):
+ """ put in the values of s2 into where there are -1's in s1 into s1
+ return s2 """
+ s1 = list(s11)
+ k = 0
+ assert len(s2) == count_less(s1,0),'mismatch in put'
+ for j in range(len(s1)):
+ if s1[j] == -1:
+ s1[j] = s2[k]
+ k=k+1
+ return s1
+
+def gaps(L1):
+ """L2 refers to POs that were solved after POs in L1 were removed
+ modifies L2 to refer to the original POs.
+ if n=0 adds in L1 and sorts
+ """
+ if L1 == [] or max(L1)+1 == len(L1):
+ return []
+ L1_gaps = []
+ i=0
+ for j in range(len(L1)):
+ lj=L1[j]
+## print lj,i
+ if lj == i:
+ i = i+1
+ continue
+ assert lj > i,'Error'
+ while lj > i:
+ L1_gaps = L1_gaps + [i]
+ i = i+1
+## print L1_gaps
+ i=i+1
+ return L1_gaps
+
+## j=i=0
+## L = []
+## L1.sort()
+## L2.sort()
+## LL1 = L1 + [10000000] #make sure list L2 is processed to the end
+#### print 'L1 and L2 is sorted %d, %d: '%(len(L1),len(L2))
+## if not L2 == []:
+## while True:
+## ## print i,j
+## if LL1[i] <= L2[j]:
+## i = i+1
+## else:
+## L= L + [L2[j] + i]
+## ## print L
+## j = j+1
+## if j == len(L2):
+## break
+## if n == 0:
+## L = L + L1
+## L.sort()
+## return L #L is already sorted
+
+
+def solve_all(L2,t):
+ global f_name, skip_spec
+ abc('w %s_solve_all.aig'%f_name)
+ old_f_name = f_name
+## abc('orpos')
+## print 'solve_all for %.2f sec.: '%t,
+## ps()
+ skip_spec_old = skip_spec
+ skip_spec = True
+ tt = max(t,900)
+ print 'Entering simple for %d sec.'%tt
+ result = simple(tt,1) #### temporary 1 means do not simplify
+## result = sp(0,t) #warning sp() may change f_name
+ skip_spec = skip_spec_old
+## print 'solve_all: result = %s'%result
+ if result[0] == 'UNSAT':
+ L2 = [0]*len(L2)
+ f_name = old_f_name
+ abc('r %s_solve_all.aig'%f_name)
+ return result[0],L2
+
+def inject(L,lst,v):
+ """
+ expends the len(L) by len(lst). puts value v in expanded position
+ Preserves values in L
+ """
+ k = i = j = 0 #i indexes L, j indexes lst, and k is total length of LL
+ if lst == []:
+ return L
+ LL = []
+ N = len(L) + len(lst)
+ while True:
+ if lst[j] == k:
+ LL= LL + [v]
+ if j < len(lst)-1:
+ j = j+1
+ else:
+ LL = LL + [L[i]]
+ if i < len(L)-1:
+ i = i+1
+ k = k+1
+ if k >= N:
+ break
+ return LL
+
+def insert(L1,L):
+ """ insert L1 in L and return L"""
+ k=0
+ for j in range(len(L)):
+ if L[j] > -1:
+ continue
+ else:
+ L[j] = L1[k]
+ k = k+1
+ if k >= len(L1):
+ break
+ return L
+
+
+
+def duplicate_values(L1,NP):
+ """ append values """
+## L=L1*NP
+ L = L1
+ for j in range(NP-1):
+ L = L+[-1]*len(L1)
return L
-def check_L(NP,L):
+##def duplicate_values2(L1,NP):
+## """ interleave values """
+## L = []
+## for j in range(len(L1)):
+## v = L1[j]
+## L = L + [v]*NP
+## return L
+
+def check_and_trim_L(NP,L):
"""This happens when an unrolling creates additional POs
- We want to check that L[j] = L[j+N] etc to make sure the PO results agree
- in all phases, i.e. sat, unsat, or undecided
+ We want to check that L[j] = L[j+kN] etc to make sure the PO results agree
+ in all phases, i.e. sat, unsat, or undecided. if one is sat then make all L[j+kN] sat,
+ If one is unsat, then all L[j+kN] must be unsat. If not then make L[j]=-1.
+ Return first N of L.
"""
N = len(L)/NP #original number of POs
for j in range(N):
+ if L[j] == 1:
+ continue
for k in range(NP)[1:]: #k = 1,2,...,NP-1
- if (L[j] == 1):
- break
- elif L[j+k*N] == 1:
+ if L[j+k*N] == 1:
L[j] = 1
break
elif L[j] == -1:
continue #we have to continue to look for a 1
elif L[j] == 0:
if L[j+k*N] == -1:
+ print 'some copies of PO unsat and some undecided'
L[j] = -1
break
continue #have to make sure that all phases are 0
return L[:N]
+def pass_down(L,L1,map):
+ """map maps L into L1.Populate L1 with values in L"""
+## print len(L),len(L1),len(map),max(map)
+## print sumsize(L)
+## print sumsize(L1)
+ for j in range(len(map)):
+ if L[j] == -1:
+ continue
+ assert L1[map[j]] == -1 or L1[map[j]] == L[j], 'L1=%d, L = %d'%(L1[map[j]],L[j])
+ L1[map[j]] = max(L[j],L1[map[j]])
+ return L1
+
+def mpr():
+ tt = time.time()
+ N=n_pos()
+ r = pre_simp()
+ if r == Unsat:
+ L = [0]*N
+ else:
+ L = mprove([-1]*n_pos(),'simple',100)
+ L = L[:N]
+ print sumsize(L)
+ print 'Time = %.2f'%(time.time() - tt)
+ return L
+
-def mprove(L,op='sp',tt=1000):
+def mprove(L,op='simple',tt=1000):
""" 0 = unsat, 1 = sat, -1 = undecided"""
global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time
- global f_name_save,name_save,temp_dec
+ global f_name_save, nam_save, temp_dec, f_name
+ f_name_save = f_name
+ nam_save = '%s_mp_save.aig'%f_name
+ abc('w %s'%nam_save)
N = len(L)
+ print 'Length L = %d, n_pos() = %d'%(len(L),n_pos())
t = tt #controls the amount of time spent on each cone
funcs = [eval('(pyabc_split.defer(%s)())'%op)]
funcs = create_funcs(slps,t)+funcs
- mtds =sublist(methods,slps) + [op]
+ mtds = sublist(methods,slps) + [op]
res = L
- for j in range(N):
+ NN = count_less(L,0)
+ rr = range(N)
+ rr.reverse()
+ init_name = init_initial_f_name
+ for j in rr:
if L[j] > -1:
continue #already solved
- abc('r %s'%name_save) #restore original function
+ print '\n************** No. Outputs = %d ******************************'%NN
+ abc('r %s'%nam_save) #restore original function
+## ps()
x = time.time()
- name = '%s_cone_%d'%(f_name_save,j)
- abc('cone -s -O %d'%j)
- abc('w %s.aig'%name)
- print '\n********************************************'
- read_file_quiet(name)
+ name = '%s_cone_%d.aig'%(f_name,j)
print '________%s(%s)__________'%(op,name)
+ abc('cone -s -O %d;scl'%j)
+ abc('w %s_cone.aig'%f_name)
+## ps()
+ read_file_quiet_i('%s_cone.aig'%f_name) #needed to reset initial settings
+## ps()
temp_dec = False
- fork_last(funcs,mtds)
+ i,result = fork_last(funcs,mtds)
+## print '\ni = %d, result = %s'%(i,str(result))
+ f_name = f_name_save #restore original f_name
T = '%.2f'%(time.time() - x)
out = get_status()
- if out == Unsat:
+## print '\nout= %d, result = %s'%(out,str(result))
+ rslt = Undecided
+ if not out == result:
+ print 'out = %d, result = %d'%(out,result)
+## assert out == result,'out = %d, result = %d'%(out,result)
+ if out == Unsat or result == 'UNSAT' or result == Unsat:
res[j] = 0
+ rslt = Unsat
if out < Unsat:
res[j] = 1
- print '\n%s: %s in time = %s'%(name,RESULT[out],T)
+ rslt = Sat
+ print '\n%s: %s in time = %s'%(name,RESULT[rslt],T)
+ abc('r %s'%nam_save) #final restore of original function for second mprove if necessary.
+ init_initial_f_name = init_name
+## print res
return res
-def sp1(options = ''):
- global sec_options
- sec_options = options
- return super_prove(1)
+##def sp1(options = ''):
+## global sec_options
+## sec_options = options
+## return super_prove(1)
-def super_prove(n=0):
+def super_prove(n=0,t=900):
"""Main proof technique now. Does original prove and if after speculation there are multiple output left
if will try to prove each output separately, in reverse order. It will quit at the first output that fails
to be proved, or any output that is proved SAT
@@ -3174,35 +5157,43 @@ def super_prove(n=0):
if n == 2 just do quick simplification instead of full simplification, then abs first, spec second
"""
global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time, f_name
- init_initial_f_name = initial_f_name
+## print 'sec_options = %s'%sec_options
+## init_initial_f_name = initial_f_name
+ size = str([n_pis(),n_pos(),n_latches(),n_ands()])
+ add_trace('[%s: size = %s ]'%(f_name,size))
if x_factor > 1:
print 'x_factor = %f'%x_factor
input_x_factor()
max_bmc = -1
x = time.time()
-## if n == 2:
-## result = prove(2)
-## else:
-## result = prove(0)
+ add_trace('prove')
result = prove(n)
+ print 'prove result = ',
+ print result
+ tt = time.time() - x
if ((result == 'SAT') or (result == 'UNSAT')):
- print '%s: total clock time taken by super_prove = %f sec.'%(result,(time.time() - x))
- return result
+ print '%s: total clock time taken by super_prove = %0.2f sec.'%(result,tt)
+ add_trace('%s'%result)
+ add_trace('Total time = %.2f'%tt)
+ print m_trace
+ return [result]+[m_trace]
elif ((result == 'UNDECIDED') and (n_latches() == 0)):
- return result
- print '%s: total clock time taken by super_prove so far = %f sec.'%(result,(time.time() - x))
+ add_trace('%s'%result)
+ add_trace('Total time = %.2f'%tt)
+ print m_trace
+ return [result]+[m_trace]
+ print '%s: total clock time taken by super_prove so far = %0.2f sec.'%(result,(time.time() - x))
y = time.time()
- if n == 2:
- print 'Entering BMC_VER()'
- result = BMC_VER() #n=2 is only called from sp2, a super_prove run in parallel.
- if ((result == 'SAT') and (('_abs' in f_name)or '_spec' in f_name)): #this is because we have done an abstraction and cex is invalid.
- result = 'UNDECIDED'
- else:
- print 'Entering BMC_VER_result'
- result = BMC_VER_result() #this does backing up if cex is found
- print 'Total clock time taken by last gasp verification = %f sec.'%(time.time() - y)
- print 'Total clock time for %s = %f sec.'%(init_initial_f_name,(time.time() - x))
- return result
+ print 'Entering BMC_VER_result'
+ add_trace('BMC_VER_result')
+ result = BMC_VER_result() #this does backing up if cex is found
+ print 'Total clock time taken by last gasp verification = %0.2f sec.'%(time.time() - y)
+ tt = time.time() - x
+ print 'Total clock time for %s = %0.2f sec.'%(init_initial_f_name,tt)
+ add_trace('%s'%result)
+ add_trace('Total time for %s = %.2f'%(init_initial_f_name,tt))
+## print m_trace
+ return [result]+[m_trace]
def reachm(t):
x = time.clock()
@@ -3258,6 +5249,7 @@ def execute_op(op,L,t):
"""
run the files in the list L using operation "op", each for max time = t
"""
+ global res
funcs = [eval('(pyabc_split.defer(%s)())'%op)]
funcs = create_funcs(slps,t)+funcs
mtds =sublist(methods,slps) + [op]
@@ -3266,7 +5258,7 @@ def execute_op(op,L,t):
x = time.time()
name = L[j]
print '\n\n\n\n________%s__________'%op
- read_file_quiet(name)
+ read_file_quiet_i(name)
m,result = fork_last(funcs,mtds)
if result == Undecided:
result = RESULT[result]
@@ -3286,10 +5278,24 @@ def x_ops(ops,L,t):
return result
def iso(n=0):
+ if n_ands() > 500000:
+ return False
+ if n_pos() < 2:
+ print 'no more than 1 output'
+ return False
+ npos=n_pos()
if n == 0:
- run_command('&get;&iso;&put')
+ abc('&get;&iso -q;&put')
+ if n_pos() == npos:
+ print 'no reduction'
+ return False
else:
run_command('&get;&iso;iso;&put')
+ if n_pos() == npos:
+ print 'no reduction'
+ return False
+ print 'Reduced n_pos from %d to %d'%(npos,n_pos())
+ return True
def check_iso(N):
ans = get_large_po()
@@ -3335,7 +5341,7 @@ def get_large_po():
def scorro():
run_command('scorr -o')
- l = remove_const_pos()
+ l = remove_const_pos(0)
ps()
def drw():
@@ -3345,7 +5351,6 @@ def drw():
def dc2rs():
abc('dc2rs')
ps()
-
def reachn(t):
x = time.clock()
@@ -3353,16 +5358,16 @@ def reachn(t):
print 'reachm3 done in time = %f'%(time.clock() - x)
return get_status()
-def reachx(t):
+def reachx(t=900):
x = time.time()
abc('reachx -t %d'%t)
print 'reachx done in time = %f'%(time.time() - x)
return get_status()
-def reachy(t):
+def reachy(t=900):
x = time.clock()
abc('&get;&reachy -v -T %d'%t)
- print 'reachy done in time = %f'%(time.clock() - x)
+## print 'reachy done in time = %f'%(time.clock() - x)
return get_status()
def create_funcs(J,t):
@@ -3393,16 +5398,18 @@ def modify_methods(J,dec=0):
N = bmc_depth()
L = n_latches()
I = n_real_inputs()
- npr = n_proc -dec
+ npr = n_proc - dec
+ reachi = reachs
if 18 in J: #if sleep in J add 1 more processor
npr = npr+1
- if ( ((I+L<350)&(N>100)) or (I+L<260) or (L<80) ):
- if not 24 in J: #24 is reachs
- J = J+[24] # add all reach methods
+ if ( ((I+L<550)&(N>100)) or (I+L<400) or (L<80) ):
+ if not 24 in J: #24 is reachy
+ if L < 70 and not 4 in reachs:
+ reachi = [4]+reachs #[4] = reachx
+ J = reachi+J # add all reach methods
if len(J)>npr:
J = remove_intrps(J) #removes only if len(J)<n_processes
if len(J)< npr: #if not using all processors, add in pdrs
- #modify allpdrs to reflect ifbip (see set_engines)
for j in range(len(allpdrs)):
if allpdrs[j] in J: #leave it in
continue
@@ -3410,19 +5417,18 @@ def modify_methods(J,dec=0):
J = J + [allpdrs[j]]
if len(J) == npr:
break
-## if L < 80:
-## if ((not 4 in J) and len(J) < n_proc):
-## J = J + [4]
+ if len(J)>npr:
+ J = remove_intrps(J)
return J
def BMC_VER():
""" a special version of BMC_VER_result that just works on the current network
Just runs engines in parallel - no backing up
"""
- global init_initial_f_name, methods, last_verify_time, n_proc
+ global init_initial_f_name, methods, last_verify_time, n_proc,last_gasp_time
xt = time.time()
result = 5
- t = max(2*last_verify_time,10000) ####
+ t = max(2*last_verify_time,last_gasp_time) ####
print 'Verify time set to %d'%t
J = slps + pdrs + bmcs + intrps
J = modify_methods(J)
@@ -3434,21 +5440,23 @@ def BMC_VER():
print 'BMC_VER result = %s'%result
return result
-
-def BMC_VER_result():
- global init_initial_f_name, methods, last_verify_time,f_name
+def BMC_VER_result(t=0):
+## return 'UNDECIDED' #TEMP
+ global init_initial_f_name, methods, last_verify_time,f_name,last_gasp_time
xt = time.time()
result = 5
abc('r %s.aig'%f_name)
- print '\n***Running proof on %s :'%f_name,
+ abc('scl')
+ print '\n***Running proof on %s after scl:'%f_name,
ps()
- t = max(2*last_verify_time,10000) #each time a new time-out is set t at least 1000 sec.
+ if t == 0:
+ t = max(2*last_verify_time,last_gasp_time) #each time a new time-out is set t at least 1000 sec.
print 'Verify time set to %d'%t
- X = pyabc_split.defer(abc)
- J = slps + pdrs + bmcs + intrps
+ J = slps + allpdrs2 + bmcs + intrps + sims
last_name = seq_name(f_name).pop()
- if 'smp' == last_name: # then we try harder to prove it.
- J = slps + intrps + allpdrs + [2]
+ if not last_name in ['abs','spec']:
+ J = slps +allpdrs2 +bmcs + intrps + sims
+## if 'smp' == last_name or last_name == f_name: # then we try harder to prove it.
J = modify_methods(J) #if # processors is enough and problem is small enough then add in reachs
F = create_funcs(J,t)
mtds = sublist(methods,J)
@@ -3457,16 +5465,24 @@ def BMC_VER_result():
result = get_status()
if result == Unsat:
return 'UNSAT'
- if last_name == 'smp' : # can't backup so just return result
+## if last_name == 'smp' or last_name == f_name: # can't backup so just return result
+ if not last_name in ['abs','spec']:
if result < Unsat:
return 'SAT'
- if result > Unsat: #still undefined
+ if result > Unsat: #still undecided
return 'UNDECIDED'
else: # (last_name == 'spec' or last_name == 'abs') - the last thing we did was an "abstraction"
if result < Unsat:
+ if last_name == 'abs':
+ add_trace('de_abstract')
+ if last_name == 'spec':
+ add_trace('de_speculate')
f_name = revert(f_name,1) # revert the f_name back to previous
abc('r %s.aig'%f_name)
+ abc('scl')
return BMC_VER_result() #recursion here.
+ else:
+ return 'UNDECIDED'
def try_split():
abc('w %s_savetemp.aig'%f_name)
@@ -3500,6 +5516,7 @@ def prove_all_ind():
return
abc('w %s_osavetemp.aig'%f_name)
lst = range(n_pos())
+## lst.reverse()
## list.reverse()
## for j in list[1:]:
for j in lst:
@@ -3526,7 +5543,7 @@ def prove_all_ind():
print '-%d'%j,
else:
print '*%d'%j,
- l=remove_const_pos()
+ l=remove_const_pos(0)
n_pos_proved = n_pos_proved + n_proved
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
print 'n_pos_proved = %d'%n_pos_proved
@@ -3544,7 +5561,7 @@ def remove_iso(L):
zero(lst)
n_pos_proved = n_pos_proved + count_less(lst,n_pos_before - n_pos_proved)
print 'The number of POs removed by iso was %d'%len(lst)
- l=remove_const_pos() #can an original PO be zero?
+ l=remove_const_pos(0) #can an original PO be zero?
def prove_all_iso():
"""Tries to prove output k by isomorphism. Gets number of iso-eq_classes as an array of lists.
@@ -3558,22 +5575,9 @@ def prove_all_iso():
print 'n_pos_proved = %d'%n_pos_proved
## run_command('&get;&iso;&put')
abc('&get;&iso')
- L = iso_eq_classes()
+ L = eq_classes()
## print L
remove_iso(L)
-## lim = n_pos_before - n_pos_proved
-## for j in range(len(L)):
-## ll = L[j]
-## if len(ll) == 1:
-## continue
-## if not ll[0] < lim:
-## continue
-## else:
-## n = count_less(ll[1:], lim) #drop the first since it is the representative.
-## print n
-## n_proved = n_proved + n
-## print n, n_proved
-## n_pos_proved = n_pos_proved + n_proved
print '\nThe number of POs reduced by iso was from %d to %d'%(N,n_pos())
def count_less(L,n):
@@ -3611,14 +5615,14 @@ def prove_all_mtds(t):
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
assert not is_sat(), 'one of the POs is SAT' #we can do better than this
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
def prove_all_pdr(t):
"""Tries to prove output k by pdr, using other outputs as constraints. If ever an output is proved
it is set to 0 so it can't be used in proving another output to break circularity.
- Finally all zero'ed ooutputs are removed. """
+ Finally all zero'ed outputs are removed. """
N = n_pos()
## l=remove_const_pos()
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
@@ -3638,14 +5642,14 @@ def prove_all_pdr(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
def prove_each_ind():
"""Tries to prove output k by induction, """
N = n_pos()
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3662,7 +5666,7 @@ def prove_each_ind():
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3670,7 +5674,7 @@ def prove_each_pdr(t):
"""Tries to prove output k by PDR. If ever an output is proved
it is set to 0. Finally all zero'ed ooutputs are removed. """
N = n_pos()
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3685,7 +5689,7 @@ def prove_each_pdr(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3693,7 +5697,7 @@ def disprove_each_bmc(t):
"""Tries to prove output k by PDR. If ever an output is proved
it is set to 0. Finally all zero'ed ooutputs are removed. """
N = n_pos()
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3708,46 +5712,63 @@ def disprove_each_bmc(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- l=remove_const_pos()
+ l=remove_const_pos(0)
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
+def add_pord(s):
+ global pord_trace
+ pord_trace = pord_trace + [s]
+
def pord_1_2(t):
""" two phase pord. First one tries with 10% of the time. If not solved then try with full time"""
- global n_pos_proved, ifpord1, pord_on
+ global n_pos_proved, ifpord1, pord_on, pord_trace
+ #first eliminate easy POs
+ ttt = n_ands()/1000
+ if ttt < 100:
+ ttt=100
+ elif ttt<200:
+ ttt = 200
+ elif ttt< 300:
+ ttt = 300
+ else:
+ ttt = 500
+ S,lst,L = par_multi_sat(ttt,1,1,1)
+ lst = indices(L,1)
+ if 1 in L:
+ return [Sat]+[['par_multi_sat: SAT']]
+ if -1 in L:
+ restrict_v(L,-1)
+ else: return [Unsat] + [['par_multi_sat: UNSAT']]
+ pord_trace = []
pord_on = True # make sure that we do not reparameterize after abstract in prove_2
n_pos_proved = 0
if n_pos()<4:
- return Undecided
+ return [Undecided] +[pord_trace]
if ifpord1:
+ add_pord('pord1')
+ t_time = .1*t
print 'Trying each output for %0.2f sec'%(.1*t)
result = pord_all(.1*t) #we want to make sure that there is no easy cex.
if (result <= Unsat):
- return result
- ifpord1 = 0
- print 'Trying each output for %0.2f sec'%t
- #might consider using iso before the second pass of pord_all
- result = pord_all(t+2*G_T) #make sure there is enough time to abstract
- pord_on = False #done with pord
- return result
-
-def pord_all(t):
+ return [result] + [pord_trace]
+ return [Undecided] + [pord_trace]
+
+def pord_all(t,n=4):
"""Tries to prove or disprove each output j by PDRM BMC3 or SIM. in time t"""
- global cex_list, n_pos_proved, last_cx, pord_on, ifpord1
- print 'last_cx = %d'%last_cx
+ global cex_list, n_pos_proved, last_cx, pord_on, ifpord1,pord_trace
+ print 'last_cx = %d, time = %0.2f'%(last_cx,t)
btime = time.time()
N = n_pos()
prove_all_ind() ############ change this to keep track of n_pos_proved
nn = n_pos()
- abc('w %s_osavetemp.aig'%f_name)
- if nn < 4: #Just cut to the chase immediately.
+ abc('w %s_osavetemp.aig'%f_name)
+ if nn < n or nn*t > 300: #Just cut to the chase immediately.
return Undecided
lst = range(n_pos())
proved = disproved = []
abc('&get') #using this space to save original file.
### Be careful that & space is not changed.
-## with redirect.redirect( redirect.null_file, sys.stdout ):
-## with redirect.redirect( redirect.null_file, sys.stderr ):
cx_list = []
n_proved = 0
lcx = last_cx + 1
@@ -3756,6 +5777,7 @@ def pord_all(t):
n_und = 0
for j in lst:
print '\ncone %s. '%j,
+ abc('&r -s %s_osavetemp.aig'%f_name) #for safety
abc('&put; cone -s -O %d'%j) #puts the &space into reg-space and extracts cone j
#requires that &space is not changed. &put resets status. Use &put -s to keep status
abc('scl -m')
@@ -3763,11 +5785,6 @@ def pord_all(t):
## print 'running sp2'
###
result = run_sp2_par(t)
-## J = slps+JV
-## result = verify(J,t)
-## result = RESULT[result]
-## ###
-## print 'run_sp2_par result is %s'%result
if result == 'UNDECIDED':
n_und = n_und + 1
status = Undecided
@@ -3799,9 +5816,11 @@ def pord_all(t):
return Sat
else:
n_pos_proved = n_pos_proved + n_proved
+ if nn == n_pos_proved:
+ return Unsat
abc('r %s_osavetemp.aig'%f_name)
## abc('&put') # returning original to work spece
- remove(proved)
+ remove(proved,0)
print '\nThe number of unproved POs reduced from %d to %d'%(N,n_pos()),
ps()
if n_pos() > 0:
@@ -3816,19 +5835,474 @@ def bmc_ss(t):
"""
global cex_list
x = time.time()
- tt = min(10,max(1,.05*t))
- abc('bmc3 -T %0.2f'%tt)
- N = n_bmc_frames()
- if N <= max_bmc:
- return Undecided
-## print bmc_depth()
-## abc('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max(3,max_bmc))))
- run_command('bmc3 -vs -C 1000000 -T %f -S %d'%(t,2*N))
+ abc('bmc3 -a -C 1000000 -T %f'%(t))
if is_sat():
cex_list = cex_get_vector() #does this get returned from a concurrent process?
n = count_non_None(cex_list)
- print '%d cexs found in %0.2f sec at frame %d'%(n,(time.time()-x),cex_frame())
- return get_status()
+ L = list_non_None(cex_list)
+ print '%d cexs found in %0.2f sec'%(n,(time.time()-x))
+## remove_disproved_pos(cex_list)
+ else:
+ L = []
+ return L
+
+def iso_slp(t=30):
+ F = [eval('pyabc_split.defer(sleep)(t))')]
+ F = F = F+[eval('(pyabc_split.defer(iso)())')]
+ for i,res in pyabc_split.abc_split_all(F):
+ if i == 0:
+ return
+
+##def iter_par_multi_sat(t=10,m=1):
+## while True:
+## abc('w %s_save.aig'%f_name)
+## S,lst1 = par_multi_sat(t,m) #run 3 engines in parallel looking for SAT outputs
+## lst1.sort()
+## print 'Found %d SAT POs'%len(lst1)
+## abc('r %s_save.aig'%f_name)
+## if len(lst1)==0:
+## break
+## remove(lst1,1)
+## pre_simp(1,1)
+## iso()
+
+def show_partitions(L):
+ for i in range(len(L)):
+ abc('&r -s %s.aig'%L[i])
+ print '\nSize = ',
+ run_command('&ps')
+ abc('&popart')
+ eqs = eq_classes()
+ N = len(eqs)
+ print 'No. of partitions = %d'%N
+ if N == 1:
+ continue
+ l = []
+ for j in range(N):
+ l=l + [len(eqs[j])]
+ print l
+
+
+def r_part(name):
+ read_file_quiet_i(name)
+ abc('&get;&scl;&scorr -C 2;&put')
+ res1 = reparam()
+ res2 = False
+ npos = n_pos()
+## if n_pos() < 100:
+## res2 = iso()
+## ps()
+ if n_pos() < 1000:
+ iso()
+ if n_pos() < 500:
+ abc('r %s.aig'%name)
+ abc('w %s_leaf.aig'%name)
+ return
+## abc('w %s_leaf.aig'%name)
+## return
+ res = two_eq_part()
+ if res == False:
+ abc('r %s.aig'%name)
+ abc('w %s_leaf.aig'%name)
+ return
+ elif min(res) < .2*max(res) and min(res) < 500:
+ abc('r %s.aig'%name)
+ abc('w %s_leaf.aig'%name)
+ return
+ else: #recur
+ r_part('%s_p0'%name)
+ r_part('%s_p1'%name)
+ return
+
+def two_eq_part():
+ abc('&get;&popart')
+ part = eq_classes()
+ if len(part) == 1:
+ print 'Partition has only one part'
+ return False
+ abc('w %s_save.aig'%f_name)
+ nn = n_pos()
+ p1=p0 = []
+ init = True
+ for i in range(len(part)): #union first half together together
+ if init == True:
+ p0=p0 + part[i]
+ if len(p0)>nn/2:
+ init = False
+ else:
+ p1 = p1 + part[i]
+ p0.sort()
+ p1.sort()
+ abc('&get')
+ remove(p1,1)
+ n0=n_pos()
+## print 'writing %s_p0.aig'%f_name
+ abc('w %s_p0.aig'%f_name)
+ abc('r %s_save.aig'%f_name)
+ remove(p0,1)
+## print 'writing %s_p1.aig'%f_name
+ n1=n_pos()
+ abc('w %s_p1.aig'%f_name)
+ return [n0,n1]
+
+def merge_parts(p,n):
+ parts = []
+ end = []
+ for i in range(len(p)):
+ if len(p[i]) > n:
+ parts = parts + [p[i]]
+ else:
+ end =end + p[i]
+ parts = parts + [end]
+ return parts
+
+
+def extract_parts(S=11):
+ abc('&get;&popart -S %d'%S)
+ part = eq_classes()
+ if len(part) == 1:
+ print 'Partition has only one part'
+ return 1
+ parts = merge_parts(part,2)
+ lp=len(parts)
+ print 'Found %d parts'%lp
+ abc('w %s_save.aig'%f_name)
+ for i in range(lp):
+ abc('r %s_save.aig'%f_name)
+ p=[]
+ for j in range(lp):
+ if i == j:
+ continue
+ else:
+ p = p + parts[j]
+ remove(p,1)
+ abc('&get;&scl;&lcorr;&put')
+ abc('w %s_part%d.aig'%(f_name,i))
+ return len(parts)
+
+def two_part():
+ abc('&get;&popart')
+ part = eq_classes()
+ if len(part) == 1:
+ print 'Partition has only one part'
+ return False
+ part1 = part[1:] #all but the 0th
+ p1=[]
+ for i in range(len(part1)): #union together
+ p1=p1 + part1[i]
+ p1.sort()
+ abc('w %s_p.aig'%f_name)
+ remove(p1,1)
+## print 'writing %s_p0.aig'%f_name
+ abc('w %s_p0.aig'%f_name)
+ n0=n_pos()
+ abc('r %s_p.aig'%f_name)
+ p0 = part[0]
+ p0.sort()
+ remove(p0,1)
+## print 'writing %s_p1.aig'%f_name
+ n1=n_pos()
+ abc('w %s_p1.aig'%f_name)
+ return [n0,n1]
+
+def set_t_gap(t1,t2):
+ nam = max(30000,n_ands())
+ ratio = 1+float(nam-30000)/float(70000)
+ gp = .5*ratio*t2
+ gp = min(100,gp)
+ t = min(100,ratio*t1)
+ return (t,gp)
+
+def par_multi_sat(t=10,gap=0,m=1,H=0):
+ """ m = 1 means multiple of 1000 to increment offset"""
+ global last_gap
+ abc('w %s_save.aig'%f_name)
+ if not t == 0:
+ if gap == 0:
+ gap = max(.2,.2*t)
+ gap = max(15,gap)
+ if gap > t:
+ t=gap
+ t,gt = set_t_gap(t,gap)
+ gt = max(15,gt)
+ if gt <= last_gap:
+ gt = 1.2*last_gap
+ else:
+ t = gt = 5
+ if gt > t:
+ t = gt
+ last_gap = gt
+## H = max(100, t/n_pos()+1)
+ if not H == 0:
+ H = (gt*1000)/n_pos()
+ H = max(min(H,1000*gt),100)
+ tme = time.time()
+ list0 = listr_0_pos() #reduces POs
+ list0.sort()
+## print 'list0 = %s'%str(list0)
+ if len(list0)>0:
+ print 'temporarily removed %d cost-0 POs'%len(list0)
+ ps()
+ if len(list0)> 0:
+ print 'Found %d const-0 POs, but not removed'%len(list0)
+## print ll
+ print 'par_multi_sat entered for %.2f sec. and gap = %.2f sec., H = %.2f'%(t,gt,H)
+ base = m*1000
+ if not m == 1:
+ offset = (m-1)*32000
+ abc('&get;&cycle -F %d;&put'%offset)
+ mx = 1000000000/max(1,n_latches())
+ N = n_pos()
+ na = n_ands()
+ F = [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,H))'%(0*base))]
+## if na < 50000:
+ F = F + [eval('(pyabc_split.defer(pdraz)(t,gt,H))')] #need pdr in??
+ F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,4,0))'%(0*base))]
+ F = F + [eval('(pyabc_split.defer(sleep)(t))')]
+ F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,4,0))'%(100))]
+ F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(100))]
+ if mx > 1*base:
+ F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,1,97))'%(1*base))]
+ F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(1*base))]
+## if mx > 2*base:
+## F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d))'%(2*base))]
+## F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(2*base))]
+ if mx > 4*base and na < 400000:
+ F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,4,23))'%(4*base))]
+ F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(4*base))]
+## if mx > 8*base and na < 300000:
+## F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,3,53))'%(8*base))]
+## F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(8*base))]
+## if mx > 16*base and na < 200000 :
+## F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,2,79))'%(16*base))]
+## F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(16*base))]
+## if mx > 32*base and na < 100000:
+## F = F + [eval('(pyabc_split.defer(sim3az)(t,gt,%d,1,97))'%(32*base))]
+## F = F + [eval('(pyabc_split.defer(bmc3az)(t,gt,%d,0))'%(32*base))]
+ ss=LL=L = []
+ S = 'UNDECIDED'
+ zero_done = two_done = False
+ s=ss = [-1]*n_pos()
+ ii = []
+ nn = len(F)
+ for i,res in pyabc_split.abc_split_all(F):
+ ii = ii + [i]
+ if len(ii) == len(F)-1: #all done but sleep
+ break
+ if i == 3: #sleep timeout
+ print 'sleep timeout'
+ break
+## if i == 1:
+## print 'PDR produced: %s'%str(res)
+#### print i
+ if i == 0:
+ zero_done = True # bmc with start at 0 is done
+ if i == 2:
+ two_done = True
+ if res == None: #this can happen if one of the methods bombs out
+ print 'Method %d returned None'%i
+ continue
+## print res
+ s1 = switch(res[1]) #res[1]= s
+ s = merge_s(list(s),s1)
+ print sumsize(s)
+ ss = ss + [s1]
+## LL = LL + [res[0]]
+## L = L + res[0]
+## L = [x for x in set(L)] #uniquefy
+ if count_less(s,0) == 0:
+ S = 'UNSAT'
+ break
+ # if i == 1 and is_unsat() and na < 50000: #pdr can return unsat.
+## if i == 1 and is_unsat(): #pdr can return unsat.
+## print 'Method pdr proved remaining POs UNSAT'
+## S = 'UNSAT'
+## L = res[0]
+## break
+## if not -1 in s:
+## S = 'UNSAT'
+## break
+ if len(ss)>1 and zero_done and two_done:
+ ss2 = ss[-2:] #checking if last 2 results agree
+ r = ss2[0]
+ if r == ss2[1] and count_less(r,1) < len(r): #at least 1 SAT PO found
+ break
+## if len(LL) > 1 and zero_done and two_done:
+## ll2 = LL[-2:] #checking if last 2 results agree
+## if ll2[0] == ll2[1] and ll2[0] > 0:
+## break
+ print 'Found %d SAT POs in '%(len(L)),
+ print 'time = %.2f'%(time.time()-tme)
+ print sumsize(s)
+## L.sort()
+## print 'L_before = %s'%(str(L))
+#### check_None_status(L,s,1) #now 1 in s means sat. s can have 0 in it, meaning it found some POs unsat.
+## L = merge(list(list0),list(L),1) #shift L according to list0 but do not include list0.
+## print 'L_shifted = %s'%(str(L))
+## # Need to return only SAT POs have to do the same for s
+ print 'len(s) = %d, len(list0) = %d'%(len(s),len(list0))
+ ss = expand(s,list0,0)
+## assert list0 == indices(ss,0)
+ print sumsize(ss)
+## assert check_consistancy(L,ss), 'inconsistant'
+ abc('r %s_save.aig'%f_name)
+ return S,[],ss
+
+
+def check_consistancy(L,s):
+ """ L is list of SAT's found. s is index of all"""
+ consistant = True
+ print 'checking s[L]'
+ for j in L: #make sure that s[L] = 1
+## print j,
+## print s[j]
+ if not s[j] == 1:
+ print j,
+ consistant = False
+ print 'checking s=1 => L'
+ for j in range(len(s)): #make sure that there are no other 1's
+ if s[j] == 1:
+ if not j in L:
+ print j,
+ consistant = False
+ return consistant
+
+
+def check_s(s1,s2):
+ assert len(s1) == len(s2),'lengths do not match'
+ miss = []
+ for i in range(len(s1)):
+ if (s1[i] == 0 and s2[i] == 1) or (s1[i] == 1 and s2[i] == 0):
+ miss = miss + [i]
+ print miss
+
+
+def merge_s(s1,s2):
+ assert len(s1) == len(s2), 'error in lengths, s1 = %s, s2 = %s'%(str(s1),str(s2))
+ s = [-1]*len(s1)
+ for i in range(len(s1)):
+ if not s1[i] == s2[i]:
+ if s1[i] == -1 or s2[i] == -1:
+ s[i] = max(s1[i],s2[i])
+ else:
+ print 'error: conflict in values at i = %d'%i
+ print 's1[i]=%d,s2[i]=%d'%(s1[i],s2[i])
+ else: #put in common value
+ s[i] = s1[i]
+ return s
+
+def switch(ss):
+ """ This changes the convention of SAT and UNSAT to SAT = 1, UNSAT = 0"""
+ s1 = ss
+ for i in range(len(ss)):
+ si = ss[i]
+ if si == 0:
+ s1[i] = 1
+ elif si == 1:
+ s1[i] = 0
+ return s1
+
+
+def pdr_ss_r(t):
+ """
+ assumes that 0 POs have been removed
+ finds a set cexs in t seconds. Returns list of SAT POs found
+ """
+ global cex_list
+ x = time.time()
+ abc('pdr -az -T %f'%(t))
+ if is_sat():
+ print 'entering cex get vector'
+ cex_list = cex_get_vector() #does this get returned from a concurrent process?
+## n = count_non_None(cex_list)
+ print len(cex_list)
+ L = list_non_None(cex_list)
+ n = len(L)
+ print '%d cexs found in %0.2f sec.'%(n,(time.time()-x))
+ if n == len(cex_list):
+ print 'all remaining POs are SAT'
+## return L
+ else:
+ remove_disproved_pos(cex_list) #note that this will not remove all POs
+ else:
+ L = []
+ print 'T = %0.2f'%(time.time()-x)
+ return L
+
+def bmc_ss_r(t):
+ """
+ assumes that 0 POs have been removed
+ finds a set cexs in t seconds. Returns list of SAT POs found
+ """
+ global cex_list
+ x = time.time()
+ abc('bmc3 -az -C 1000000 -T %f'%(t))
+ if is_sat():
+ print 'entering cex get vector'
+ cex_list = cex_get_vector() #does this get returned from a concurrent process?
+## n = count_non_None(cex_list)
+ L = list_non_None(cex_list)
+ n= len(L)
+ print '%d cexs found in %0.2f sec.'%(n,(time.time()-x))
+ if n == len(cex_list):
+ print 'all remaining POs are SAT'
+## return L
+ else:
+ remove_disproved_pos(cex_list) #note that this will not remove all POs
+ else:
+ L = []
+ print 'T = %0.2f'%(time.time()-x)
+ return L
+
+def sim_ss_r(t):
+ """
+ assumes that 0 POs have been removed
+ finds a set cexs in t seconds. Returns list of SAT POs found
+ """
+ global cex_list
+ x = time.time()
+ run_command('sim3 -az -T %f'%(t))
+ if is_sat():
+ print 'entering cex get vector'
+ cex_list = cex_get_vector() #does this get returned from a concurrent process?
+## n = count_non_None(cex_list)
+ L = list_non_None(cex_list)
+ n = len(L)
+ print '%d cexs found in %0.2f sec.'%(n,(time.time()-x))
+ if n == len(cex_list):
+ print 'all remaining POs are SAT'
+## return L
+ else:
+ remove_disproved_pos(cex_list) #note that this will not remove all POs
+ else:
+ L = []
+ print 'T = %0.2f'%(time.time()-x)
+ return L
+
+def check_None_status(L,s=[],v=0):
+ """ L is the PO numbers that had non_None in
+ 0 means sat and 1 means unsat is
+ v tells which value means sat"""
+ if s == []:
+ s = status_get_vector()
+ error = False
+ for j in L:
+ if s[j] == v:
+ continue
+ else:
+ error = True
+ for i in range(len(s)):
+ if s[i] == v:
+ if i in L:
+ continue
+ else:
+ error = True
+ if error:
+ print 'status and non_None do not agree'
+ print 'L = %d'%L
+ print 'SAT and UNSAT counts switched'
+ print sumsize(s)
+
def list_non_None(lst):
""" return [i for i,s in enumerate(cex_list) if not s == None]"""
@@ -3850,15 +6324,23 @@ def remove_disproved_pos(lst):
for i in range(len(lst)):
if not lst[i] == None:
abc('zeropo -N %d'%i)
- l=remove_const_pos()
+ l=remove_const_pos(0)
+
+def remove_proved_pos(lst):
+ for i in range(len(lst)):
+ if lst[i] > -1:
+ abc('zeropo -N %d'%i)
+ remove_const_pos(0)
+
+
-def bmc_j(t):
+def bmc_j(t=900):
""" finds a cex in t seconds starting at 2*N where N is depth of bmc -T 1"""
x = time.time()
tt = min(5,max(1,.05*t))
abc('bmc3 -T %0.2f'%tt)
if is_sat():
- print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())
+## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())
return get_status()
## abc('bmc3 -T 1')
N = n_bmc_frames()
@@ -3868,29 +6350,34 @@ def bmc_j(t):
cmd = 'bmc3 -J 2 -D 4000 -C 1000000 -T %f -S %d'%(t,2*N)
## print cmd
abc(cmd)
- if is_sat():
- print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())
- return get_status()
+## if is_sat():
+## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())
+ return RESULT[get_status()]
-def pdrseed(t):
+def pdrseed(t=900):
"""uses the abstracted version now"""
## abc('&get;,treb -rlim=60 -coi=3 -te=1 -vt=%f -seed=521'%t)
abc('&get;,treb -rlim=100 -vt=%f -seed=521'%t)
-
+ return RESULT[get_status()]
def pdrold(t):
abc('&get; ,pdr -vt=%f'%t)
+ return RESULT[get_status()]
-def pdr(t):
+def pdr(t=900):
abc('&get; ,treb -vt=%f'%t)
return RESULT[get_status()]
-def pdra(t):
+def pdr0(t=900):
+ abc('&get; ,pdr -rlim=100 -vt=%f'%t)
+ return RESULT[get_status()]
+
+def pdra(t=900):
## abc('&get; ,treb -rlim=100 -ssize -pre-cubes=3 -vt=%f'%t)
- abc('&get; ,treb -abs -rlim=100 -gen-cex -vt=%f'%t)
+ abc('&get; ,treb -abs -rlim=100 -sat=abc -vt=%f'%t)
return RESULT[get_status()]
-def pdrm(t):
+def pdrm(t=900):
abc('pdr -C 0 -T %f'%t)
return RESULT[get_status()]
@@ -3898,10 +6385,77 @@ def pdrmm(t):
abc('pdr -C 0 -M 298 -T %f'%t)
return RESULT[get_status()]
+def bmc2(t):
+ abc('bmc2 -C 1000000 -T %f'%t)
+ return RESULT[get_status()]
+
+def bmc(t=900):
+ abc('&get; ,bmc -vt=%d'%t)
+ return RESULT[get_status()]
+
+def intrp(t=900):
+ abc('&get; ,imc -vt=%d'%t)
+ return RESULT[get_status()]
+
+def bmc3az(t=900,gt=10,C=999,H=0):
+ t_init = time.time()
+ if C > 999:
+ abc('&get; &cycle -F %d; &put'%C)
+ abc('bmc3 -az -C 1000000 -T %d -G %d -H %.2f'%(t,gt,H))
+ cex_list = cex_get_vector()
+ L = list_non_None(cex_list)
+## check_None_status(L)
+ print 'bmc3az(%.2f,%.2f,%d,%d): CEXs = %d, time = %.2f'%(t,gt,C,H,len(L),(time.time()-t_init))
+## s = status_get_vector()
+ s = [-1]*n_pos()
+ for j in L:
+ s[j]=0 #0 here means SAT. It will be switched in par_multi_sat
+ return L,s
+
+def pdraz(t=900,gt=10,H=0):
+ print 'pdraz entered with t = %.2f, gt = %.2f, H = %.2f'%(t,gt,H)
+ t_init = time.time()
+ run_command('pdr -az -T %d -G %d -H %.2f'%(t,gt,H))
+ cex_list = cex_get_vector()
+ L = list_non_None(cex_list)
+## check_None_status(L)
+ s = status_get_vector()
+ if s == None:
+ print "status_get_vector returned None"
+ else:
+ print 'Number of UNSAT POs = %d'%(len(s) - count_less(s,1))
+ print 'pdraz(%.2f,%.2f,%d): CEXs = %d, time = %.2f'%(t,gt,H,len(L),(time.time()-t_init))
+ return L,s
+
+def sim3az(t=900,gt=10,C=1000,W=5,N=0):
+ """ N = random seed, gt is gap time, W = # words, F = #frames"""
+ t_init = time.time()
+ if C > 1000:
+ abc('&get; &cycle -F %d; &put'%C)
+ abc('sim3 -az -T %.2f -G %.2f -F 40 -W %d -N %d'%(t,gt,W,N))
+ cex_list = cex_get_vector()
+ L = list_non_None(cex_list)
+## check_None_status(L)
+ s = [-1]*n_pos()
+ for i in L:
+ s[i] = 0 #0 indicates SAT here
+ print 'sim3az(%.2f,%.2f,%d,%d,%d): CEXs=%d, time = %.2f'%(t,gt,C,W,N,len(L),(time.time()-t_init))
+ return L,s
+
+def bmc3(t=900):
+ abc('bmc3 -C 1000000 -T %d'%t)
+ return RESULT[get_status()]
+
+def intrpm(t=900):
+ abc('int -C 1000000 -F 10000 -K 1 -T %d'%t)
+ print 'intrpm: status = %d'%get_status()
+ return RESULT[get_status()]
+
def split(n):
+ global aigs
abc('orpos;&get')
abc('&posplit -v -N %d;&put;dc2'%n)
- trim()
+ res =a_trim()
def keep_splitting():
for j in range(5):
@@ -3916,55 +6470,14 @@ def keep_splitting():
def drill(n):
run_command('&get; &reachm -vcs -H 5 -S %d -T 50 -C 40'%n)
-def prove_1(ratio=.75):
- """
- A version of prove called from prove_pos, prove_g_pos, prove_only, prove_g_pos_split when we
- have speculated and produced multiple outputs.
- Proves all the outputs together. If ever an abstraction was done then
- if SAT is returned,we make RESULT return "undecided".
- """
- global x_factor,xfi,f_name,x, initial_f_name
- x = time.time()
- max_bmc = -1
- print 'Initial: ',
- ps()
- x_factor = xfi
- initial_f_name_save = initial_f_name #needed because we are making local backups here.
- initial_f_name = '%s_temp'%initial_f_name
- set_globals()
- print'\n***Running abstract'
- status = abstract(ifbip)
- trim()
- status = process_status(status)
- if ((status <= Unsat) or status == Error):
- if status < Unsat:
- print 'CEX in frame %d'%cex_frame(),
- print 'abstract found a cex in initial circuit'
- print 'Time for proof = %f sec.'%(time.time() - x)
- initial_f_name = initial_f_name_save
- return RESULT[status]
- print 'Time for proof = %f sec.'%(time.time() - x)
- initial_f_name = initial_f_name_save
- return RESULT[status]
- #undecided here
- print 'Entering direct verificationb'
-#### status = final_verify_recur(2)
- status = BMC_VER()
- return status
-
- trim()
-#### write_file('final')
- print 'Time for proof = %f sec.'%(time.time() - x)
- initial_f_name = initial_f_name_save
- return RESULT[status]
-
+
def pre_reduce():
x = time.clock()
pre_simp()
write_file('smp')
abstract(ifbip)
#### write_file('abs')
- print 'Time = %f'%(time.clock() - x)
+ print 'Time = %0.2f'%(time.clock() - x)
def sublist(L,I):
# return [s for i,s in enumerate(L) if i in I]
@@ -3984,7 +6497,6 @@ It has been set up so that each of the functions works on the current aig and
possibly transforms it. The new aig and status is always read into the master when done
"""
-
def tf():
result = top_fork()
return result
@@ -4000,31 +6512,39 @@ def top_fork(J,t):
def run_sp2_par(t):
""" Runs the single method simple, timed for t seconds."""
- global cex_list,methods
- J = slps+[6]
- print sublist(methods,J)
+ global cex_list,methods, pord_trace
+ J = slps+[6] #6 is the 'simple' method
+## mtds = sublist(methods,J)
+## print mtds,
+ print 'time = %0.2f'%t
funcs = create_funcs(J,t)
y = time.time()
for i,res in pyabc_split.abc_split_all(funcs):
## print 'i,res = %d,%s'%(i,res)
- t = time.time()-y
+ T = time.time()-y
if i == 0:
- print 'sleep timer expired in %0.2f'%t
+ print 'Timer expired in %0.2f'%T
return 'UNDECIDED'
else:
## print i,res
- if res == 'UNSAT':
- print 'Simple_prove proved UNSAT in %0.2f sec.'%t
+ #note simple returns a vector
+ mtd = res[1]
+ ress = res[0]
+ if ress == 'UNSAT':
+ print 'simple proved UNSAT in %0.2f sec.'%T
+ add_pord('UNSAT by %s'%mtd)
return 'UNSAT'
- elif res == 'UNDECIDED':
- print 'Simple_prove proved UNDECIDED in %0.2f sec.'%t
+ elif ress == 'UNDECIDED':
+ print 'simple returned UNDECIDED in %0.2f sec.'%T
return 'UNDECIDED'
- else:
- print 'Simple_prove found cex in %0.2f sec.'%t
+ if ress == 'SAT':
+ print 'simple found cex in %0.2f sec.'%T
+ add_pord('SAT by %s'%mtd)
return 'SAT'
-
+ else:
+ assert False, 'ress = %s'%ress
-def run_parallel(J,t,BREAK):
+def run_parallel(J,t,BREAK='US'):
""" Runs the listed methods J, each for time = t, in parallel and
breaks according to BREAK = subset of '?USLB'"""
global cex_list, methods
@@ -4086,16 +6606,19 @@ def fork_break(funcs,mtds,BREAK):
M = mtds[i]
last_winner = M
if M == 'sleep':
- print 'sleep: time expired in %s sec.'%convert(t)
- assert status >= Unsat,'status = %d'%status
+ print 'sleep: time expired in %0.2f sec.'%(t)
+## return 0,[Undecided]+[M]
+## assert status >= Unsat,'status = %d'%status
break
if ((status > Unsat) and '?' in BREAK): #undecided
- break
- elif status == Unsat: #unsat
- print '%s: UNSAT in %s sec.'%(M,convert(t))
+ break
+ elif status == Unsat or res == 'UNSAT': #unsat
+ print '%s: UNSAT in %0.2f sec.'%(M,(t))
+ status = Unsat
if 'U' in BREAK:
break
- elif status < Unsat: #status == 0 - cex found
+ elif status < Unsat or res == 'SAT': #status == 0 - cex found
+ status = Sat
if M in methods:
if methods.index(M) in exbmcs+allreachs+allpdrs+[1]: #set the known best depth so far. [1] is interp
set_max_bmc(n_bmc_frames())
@@ -4108,43 +6631,58 @@ def fork_break(funcs,mtds,BREAK):
break
else:
continue
- return i,status
+ add_trace('%s by %s'%(RESULT[status],M))
+ return i,[status]+[M]
def fork_best(funcs,mts):
""" fork the functions, If not solved, take the best result in terms of AIG size"""
global f_name
n = len(mts)-1
+ r = range(len(mts))
y = time.time()
m_best = -1
best_size = [n_pis(),n_latches(),n_ands()]
## print best_size
abc('w %s_best_aig.aig'%f_name)
for i,res in pyabc_split.abc_split_all(funcs):
+ if mts[i] == 'sleep':
+ m_best = i
+ break
+ r = delete(r,i)
+ if len(r) == 1:
+ if mts[r[0]] == 'sleep':
+ break
status = prob_status()
-## print i,
-## ps()
-## print i,res,
- #ps()
if not status == -1: #solved here
m = i
t = time.time()-y
if status == 1: #unsat
- print '%s proved UNSAT in %f sec.'%(mtds[i],t)
+ print '%s proved UNSAT in %f sec.'%(mts[i],t)
else:
- print '%s found cex in %f sec. - '%(mtds[i],t),
+ print '%s found cex in %f sec. - '%(mts[i],t),
break
else:
cost = rel_cost(best_size)
-## print i,cost
if cost < 0:
best_size = [n_pis(),n_latches(),n_ands()]
-## print best_size
m_best = i
-## print m_best
abc('w %s_best_aig.aig'%f_name)
abc('r %s_best_aig.aig'%f_name)
+ add_trace('%s'%mts[m_best])
return m_best,res
+def delete(r,i):
+ """ remove element in the list r that corresponds to i """
+ ii = r.index(i)
+ z = []
+ for i in range(len(r)):
+ if i == ii:
+ continue
+ else:
+ z = z + [r[i]]
+ return z
+
+
def take_best(funcs,mts):
""" fork the functions, If not solved, take the best result in terms of AIG size"""
global f_name
@@ -4161,39 +6699,58 @@ def take_best(funcs,mts):
abc('r %s_best_aig.aig'%f_name)
return m_best,res
-
def fork_last(funcs,mtds):
""" fork the functions, and take first definitive answer, but
if last method ends first, then kill others"""
+ global m_trace,hist,sec_options
n = len(mtds)-1
m = -1
y = time.time()
- lst = ''
- print mtds
+ sres =lst = ''
+## print mtds
#print 'starting fork_last'
for i,res in pyabc_split.abc_split_all(funcs):
- #print i,res
+## print i,res
status = prob_status()
- if not status == -1: #solved here
+ if mtds[i] == 'par_scorr' and n_ands() == 0:
+ add_trace('UNSAT by %s'%res)
+ return i,Unsat
+ if not status == -1 or res in ['SAT','UNSAT']: #solved here
m = i
t = int(time.time()-y)
- if status == 1: #unsat
+ if status == 1 or res == 'UNSAT': #unsat
+ sres = str(res)
+ res = Unsat
print '%s proved UNSAT in %d sec.'%(mtds[i],t)
else:
- print '%s found cex in %s sec. - '%(mtds[i],convert(t)),
+ res = Sat
+ print '%s found cex in %0.2f sec. - '%(mtds[i],(t)),
break
elif i == n:
+## print res
+ if mtds[i] == 'pre_simp':
+ m_trace = m_trace + [res[1]]
+ hist = res[2]
t = int(time.time()-y)
m = i
- print '%s: %d sec.'%(mtds[i],t)
- ps()
- break
+ if mtds[i] == 'initial_speculate':
+ return m,res
+ else:
+ print '%s: UNDECIDED in %d sec.'%(mtds[i],t)
+ res = Undecided
+ ps()
+ break
elif mtds[i] == 'sleep':
res = Undecided
t = time.time()-y
- print 'sleep timer expired in %0.2f'%t
+ print 'Timer expired in %0.2f'%t
break
lst = lst + ', '+mtds[i]
+## sres = str(res)
+ if sres[:5] == 'scorr':
+ add_trace('UNSAT by %s'%sres)
+ return m,Unsat
+ add_trace('%s by %s'%(RESULT[res],mtds[i]))
return m,res
def fork(funcs,mtds):
@@ -4208,10 +6765,10 @@ def fork(funcs,mtds):
## if t > .15*last_verify_time: ##### temp
t = last_verify_time = last_verify_time + .1*t
#print 'verify time increased to %s'%convert(t)
- assert res == get_status(),'res: %d, status: %d'%(res,get_status())
+ assert res[0] == get_status(),'res: %d, status: %d'%(res,get_status())
+## add_trace('%s by %s'%(RESULT[res[0]],mtds[i]))
return i,res
-
def save_time(M,t):
global win_list,methods
j = methods.index(M)
@@ -4529,9 +7086,31 @@ def speed_tradeoff(k=4):
abc('r %s_bestsp.blif'%f_name)
return
+def area_tradeoff(k=4):
+ print nl(),
+ best = n_nodes()
+ abc('write_blif %s_bestsp.blif'%f_name)
+ L_init = n_levels()
+ while True:
+ L_old = n_levels()
+ L = L_old +1
+ n_nodes_old = n_nodes()
+ abc('speedup;if -a -D %d -F 2 -K %d -C 11'%(L,k))
+ if n_nodes() < best:
+ best = n_nodes()
+ abc('write_blif %s_bestsp.blif'%f_name)
+## if n_levels() == L_old:
+ if n_nodes == n_nodes_old:
+ break
+ print nl(),
+ continue
+ abc('r %s_bestar.blif'%f_name)
+ return
+
+
def map_lut_dch(k=4):
'''minimizes area '''
- abc('st; dch; if -a -F 2 -K %d -C 11; mfs -a -L 50 ; lutpack -L 50'%k)
+ abc('st; dch; if -a -F 2 -K %d -C 11; mfs2 -a -L 50 ; lutpack -L 50'%k)
def map_lut_dch_iter(k=8):
## print 'entering map_lut_dch_iter with k = %d'%k
@@ -4582,6 +7161,37 @@ def dmitri_iter(k=8):
## run_command('cec -n %s.aig'%f_name)
print nl()
+def shrink():
+ tm = time.time()
+ best = n_ands()
+ while True:
+ abc('&get;&if -K 4 -F 1 -A 0 -a;&shrink;&put')
+ print n_ands(),
+ if n_ands()< .99*best:
+ best = n_ands()
+ continue
+ break
+ print 't = %.2f, '%(time.time()-tm),
+ ps()
+
+def shrink_lut():
+ tm = time.time()
+ abc('&get;&if -K 4 -F 1 -A 0 -a;&put')
+ best = n_nodes()
+ print best,
+ abc('&shrink')
+ while True:
+ abc('&if -K 4 -F 1 -A 0 -a;&put')
+ print n_nodes(),
+ if n_nodes() < .99*best:
+ best = n_nodes()
+ abc('&shrink')
+ continue
+ break
+ abc('&put')
+ print 'time = %.2f, '%(time.time()-tm),
+ ps()
+
def map_lut(k=4):
'''minimizes edge count'''
@@ -4597,6 +7207,7 @@ def nl():
def dc2_iter(th=.999):
abc('st')
+ tm = time.time()
while True:
na=n_ands()
abc('dc2')
@@ -4604,6 +7215,22 @@ def dc2_iter(th=.999):
## print nl(),
if n_ands() > th*na:
break
+ print 't = %.2f, '%(time.time()-tm),
+ ps()
+## print n_ands()
+
+def drw_iter(th=.999):
+ abc('st')
+ tm = time.time()
+ while True:
+ na=n_ands()
+ abc('drw')
+ print n_ands(),
+## print nl(),
+ if n_ands() > th*na:
+ break
+ print 't = %.2f, '%(time.time()-tm),
+ ps()
## print n_ands()
def adc2_iter(th=.999):
@@ -4731,7 +7358,7 @@ def snap_bestk(k):
def cec_it():
""" done because &r changes the names. Can't use -n because rfraig_store reorders pis and pos."""
abc('write_blif %s_temp.blif'%f_name)
- abc('&r %s.aig;&put'%f_name)
+ abc('&r -s %s.aig;&put'%f_name)
run_command('cec %s_temp.blif'%f_name)
abc('r %s_temp.blif'%f_name)
@@ -4972,16 +7599,14 @@ def check_frames():
abc('read_status vta.status')
return n_bmc_frames()
-def gate_abs(t):
+def vta_abs(t):
""" Do gate-level abstraction for F frames """
r = 100 *(1 - abs_ratio)
- run_command('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&gla_derive; &put'%(f_name,t,r))
+## abc('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&w %s_gla.aig;&gla_derive; &put; w %s_gabs.aig'%(f_name,t,r,f_name,f_name))
+ abc('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&w %s_gla.aig'%(f_name,t,r,f_name))
+
## write_file('abs')
-
-def gla_abs(t):
- """ Do gate-level abstraction for F frames """
- r = 100 *(1 - abs_ratio)
- run_command('orpos; &get;&gla_cba -C 0 -T %d -F 0 -R %d; &gla_derive; &put'%(t,r))
+
def sizeof():
return [n_pis(),n_pos(),n_latches(),n_ands()]
@@ -4989,13 +7614,16 @@ def sizeof():
def abstract(ifb=2):
global abs_ratio
## print 'ifb = %d'%ifb
- if ifb == 0: #new way using gate_abs and no bip
+ if ifb == 0: #new way using vta_abs and no bip
+ add_trace('abstracta')
return abstracta(False)
elif ifb == 1: #old way using ,abs
assert ifb == ifbip, 'call to abstract has ifb not = global ifbip'
+ add_trace('abstractb')
return abstractb()
else:
#new way using ,abs -dwr -- (bip_abs)
+ add_trace('abstracta')
return abstracta(True)
def abstracta(if_bip=True):
@@ -5034,14 +7662,20 @@ def abstracta(if_bip=True):
mtds = ['bip_abs']
funcs = [eval('(pyabc_split.defer(bip_abs)(t))')]
else:
- print 'using gate_abs'
- mtds = ['gate_abs']
- funcs = [eval('(pyabc_split.defer(gate_abs)(t))')]
+ if gla:
+ print 'using gla_abs_iter for %0.2f sec.'%(t-2)
+ mtds = ['gla_abs_iter']
+ add_trace('gla_abs')
+ funcs = [eval('(pyabc_split.defer(gla_abs_iter)(t-2))')]
+ else:
+ print 'using vta_abs for %0.2f sec.'%(t-2)
+ mtds = ['vta_abs']
+ funcs = [eval('(pyabc_split.defer(vta_abs)(t-2))')]
funcs = funcs + [eval('(pyabc_split.defer(monitor_and_prove)())')]
- J = [34,30]
- if n_ands()> 500000: #if greater than this, bmc_j may take too much memory.
- J = [34]
-## J=[]
+## J = [34,30]
+ J = pdrs[:1]+bmcs[:1] #just use one pdr and one bmc here.
+## J = pdrs+bmcs
+## J = modify_methods(J,2)
funcs = funcs + create_funcs(J,1000)
mtds = mtds + ['monitor_and_prove'] + sublist(methods,J)
print 'methods = ',
@@ -5049,90 +7683,105 @@ def abstracta(if_bip=True):
vta_term_by_time=0
for i,res in pyabc_split.abc_split_all(funcs):
## print i,res
- if i == 0: #vta ended first
+ if i == 0: #vta or gla ended first
print 'time taken = %0.2f'%(time.time() - tt)
if is_sat():
- print 'vta abstraction found cex in frame %d'%cex_frame()
+ print 'vta/gla abstraction found cex in frame %d'%cex_frame()
+ add_trace('SAT by gla')
return Sat
if is_unsat():
- print 'vta abstraction proved UNSAT'
+ print 'vta/gla abstraction proved UNSAT'
+ add_trace('UNSAT by gla')
return Unsat
else: #undecided
+ if if_bip:
+ abc('&r -s %s_greg.aig; &abs_derive; &put; w %s_gabs.aig'%(f_name,f_name))
+ else:
+ abc('&r -s %s_gla.aig;&gla_derive; &put; w %s_gabs.aig'%(f_name,f_name))
if time.time() - tt < .95*t:
print 'abstraction terminated but not by timeout'
vta_term_by_time = 0
break
else:
- print 'abstraction terminated by a timeout of %d'%t
+ print 'abstraction terminated by a timeout of %0.2f'%t
## print 'final abstraction: ',
## ps()
vta_term_by_time=1
break
if i == 1: #monitor and prove ended first (sleep timed out)
+ print 'monitor_and_prove: '
## print i,res
- if res > Unsat: #we abandon abstraction
-## print 'final abstraction: ',
-## ps()
-## print 'Trying to verify final abstraction'
-## result = verify([7,9,19,23,24,30],100) #do this if if_bip==0
-## if result == Unsat:
-## print 'Abstraction proved valid'
-## return result
-## else:
-## print 'Abstract time wasted = %0.2f'%(time.time()-tt)
-## abc('r %s_before_abs.aig'%f_name)
-## result = Undecided_no_reduction
-## return result
-## elif res == Undecided_no_reduction:
- print 'monitor and prove timed out or little reduction'
+ if res == None:
+ print 'monitor and prove had an error'
+ continue
+ result = res[0]
+ if res[0] > Undecided: #we abandon abstraction
+ add_trace('de_abstract')
+ print 'monitor and prove timed out or too little reduction'
abc('r %s_before_abs.aig'%f_name)
return Undecided_no_reduction
+ if res[0] == Undecided:
+ break
else:
if not initial_size == sizeof(): #monitor and prove should not return SAT in this case'
assert not is_sat(), 'monitor_and_prove returned SAT on abstraction!'
print 'time taken = %0.2f'%(time.time() - tt)
- if is_unsat():
+ if is_unsat() or res[0] == 'UNSAT' or res[0] == Unsat:
+ add_trace('UNSAT by %s'%res[1])
return Unsat
- elif is_sat():
+ elif is_sat() or res[0] < Unsat:
+ add_trace('SAT by %s'%res[1])
return Sat
else:
abc('r %s_before_abs.aig'%f_name)
return Undecided_no_reduction
else: #one of the engines got an answer
print 'time taken = %0.2f'%(time.time() - tt)
+## add_trace('initial %s'%mtds[i])
if is_unsat():
print 'Initial %s proved UNSAT'%mtds[i]
+ add_trace('UNSAT by initial %s'%mtds[i])
return Unsat
if is_sat():
print 'Initial %s proved SAT'%mtds[i]
+ add_trace('SAT by initial %s'%mtds[i])
return Sat
else: # an engine failed here
print 'Initial %s terminated without result'%mtds[i]
+ add_trace('method %s failed'%mtds[i])
## return Undecided
continue
- if vta_term_by_time == 0 and if_bip == 0: #vta timed out itself
+ if vta_term_by_time == 0 and if_bip == 0 and gabs: #vta timed out itself
print 'Trying to verify final abstraction',
ps()
result = verify([7,9,19,23,24,30],100)
- if result == Unsat:
+ if result[0] == Unsat:
+ add_trace('UNSAT by %s'%result[1])
print 'Abstraction proved valid'
- return result
+ return result[0]
# should do abstraction refinement here if if_bip==1
- if if_bip == 0:
- print 'abstraction no good - restoring initial simplified AIG'
+ if if_bip == 0 and gabs: # thus using vta or gla abstraction and no refinement
+ print 'abstraction no good - restoring initial simplified AIG',
abc('r %s_before_abs.aig'%f_name)
+ add_trace('de_abstract')
+ ps()
return Undecided_no_reduction
- else:
+ else: # thus using bip_abs (ifbip=1) or gate abstraction (ifbip=0&gabs=False) and refinement
if is_sat():
print 'Found true counterexample in frame %d'%cex_frame()
+ add_trace('SAT')
return Sat_true
if is_unsat():
+ add_trace('UNSAT')
return Unsat
## set_max_bmc(NBF)
NBF = bmc_depth()
print 'Abstraction good to %d frames'%max_bmc
#note when things are done in parallel, the &aig is not restored!!!
- abc('&r %s_greg.aig; &w initial_greg.aig; &abs_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
+ if if_bip:
+ abc('&r -s %s_greg.aig; &w initial_greg.aig; &abs_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
+ else:
+ run_command('&r -s %s_gla.aig; &w initial_gla.aig; &gla_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
set_max_bmc(NBF)
print 'Initial abstraction: ',
ps()
@@ -5143,148 +7792,356 @@ def abstracta(if_bip=True):
## (latches_after >= .75*latches_before_abs)):
if small_abs(abs_ratio):
abc('r %s_before_abs.aig'%f_name)
- print "Little reduction!"
+ print "Too little reduction!"
print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+ add_trace('de_abstract')
return Undecided_no_reduction
sims_old = sims
sims=sims[:1] #make it so that rarity sim is not used since it can't find a cex
- result = abstraction_refinement(latches_before_abs, NBF,abs_ratio)
- sims = sims_old
- if result <= Unsat:
- return result
- ## if n_latches() >= .90*latches_before_abs:
- ## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
- ## if rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1:
+## result = Undecided_no_reduction
+ print 'small_abs = %.2f, vta_term_by_time = %d'%(small_abs(abs_ratio),vta_term_by_time)
+ if not vta_term_by_time:
+ print 'Entering abstraction_refinement'
+ result = abstraction_refinement(latches_before_abs, NBF,abs_ratio)
+ sims = sims_old
+ if result <= Unsat:
+ return result
if small_abs(abs_ratio): #r is ratio of final to initial latches in absstraction. If greater then True
+## if small_abs(abs_ratio) or result == Undecided_no_reduction or vta_term_by_time: #r is ratio of final to initial latches in absstraction. If greater then True
abc('r %s_before_abs.aig'%f_name) #restore original file before abstract.
- print "Little reduction! ",
+ print "Too little reduction! ",
print 'Abstract time wasted = %0.2f'%(time.time()-tt)
- result = Undecided_no_reduction
- return result
- #new
+ add_trace('de_abstract')
+ return Undecided_no_reduction
+ elif vta_term_by_time:
+ abc('r %s_gabs.aig'%f_name)
+ print 'Simplifying and testing abstraction'
+ reparam()
+ result = simplify()
+ assert result >= Unsat, 'simplify returned SAT'
+ if result > Unsat: #test if abstraction is unsat
+ result = simple()
+ res = result[0]
+ if res == 'UNSAT':
+ return Unsat
+ else:
+ abc('r %s_before_abs.aig'%f_name) #restore original file before abstract.
+ print "Timed out with bad abstraction",
+ print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+ add_trace('de_abstract')
+ return Undecided_no_reduction
+## if res == 'SAT':
+#### result = Sat #this was an error
+## result = Undecided_no_reduction
+## elif res == 'UNSAT':
+## result = Unsat
+## else:
+## result = Undecided_no_reduction
+## return result
else:
write_file('abs') #this is only written if it was not solved and some change happened.
print 'Abstract time = %0.2f'%(time.time()-tt)
return result
+def gla_abs_iter(t):
+ """ this iterates &gla every x sec and checks if it should be stopped or continued.
+ Uses the fact that when &gla ends
+ it leaves the result in the &-space showing which elements are abstracted.
+ cex_abs_depth, time_abs_prev and time_abs come from monitor_and_prove
+ gla_abs_iter and monitor_and_prove are run in parallel
+ """
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
+ it_interval = 10000
+ total = t
+ tt = time.time()
+ run_command('orpos;&get')
+## run_command('&w %s_gla.aig'%f_name)
+ abs_depth = abs_depth_prev = 0
+## while True:
+ r = 100 *(1 - abs_ratio)
+ q = 99 #############TEMP
+## run_command('&r %s_gla.aig'%f_name)
+ time_remain = total - (time.time() - tt) #time remaining
+ it = min(it_interval,time_remain)
+## if it < 2:
+## break
+ #gla and vabs are the file with the abstraction info and gabs is the derived file.
+ cmd = '&gla -mvs -B 1 -A %s_vabs.aig -T %d -R %d -Q %d -S %d'%(f_name,it,r,q,abs_depth)
+ print 'Executing %s'%cmd
+ name = '%s_vabs.aig'%f_name
+ run_command(cmd)
+ if os.access(name,os.R_OK):
+ run_command('&r -s %s_vabs.aig'%f_name) #get the last abstraction result
+ run_command('&w %s_gla.aig'%f_name) #saves the result of abstraction.
+ else:
+ run_command('&r -s %s_abs_old.aig'%f_name) #get the last abstraction result
+ run_command('&w %s_gla.aig'%f_name) #saves the result of abstraction.
+ print 'wrote %s_gla file'%f_name
+ run_command('&gla_derive;&put')
+ run_command('w %s_gabs.aig'%f_name)
+## break
+## abs_depth_prev = abs_depth
+## abs_depth = n_bmc_frames()
+## print 'abs_depth = %d'%abs_depth
+## #test here if done
+## if (time.time()-tt) > total:
+## break
+## print 'reading abs_values'
+## read_abs_values()
+## print 'values read'
+## if abs_done(time_remain):
+## print 'abs_done'
+## break
+## else:
+## continue
+
+def read_abs_values():
+ """here we read in the abs values written by monitor and prove"""
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
+ if not os.access('%s_ab.txt'%f_name,os.R_OK):
+ print '%s_ab.txt does not exist'%f_name
+ return #file does not exist so do nochange values
+## print '%s_ab.txt file exists and is readable'%f_name
+ ab = open('%s_ab.txt'%f_name,'r')
+ print '%s_ab.txt is opened'%f_name
+ s = ab.readline()
+## print s
+ cex_abs_depth = int(s)
+ s = ab.readline()
+## print s
+ time_abs_prev = float(s)
+ s = ab.readline()
+## print s
+ time_abs = float(s)
+ s = ab.readline()
+## print s
+ abs_depth_prev = float(s)
+ s = ab.readline()
+## print s
+ abs_depth = float(s)
+ ab.close()
+## print 'read: ',
+## print cex_abs_depth,time_abs_prev,time_abs,abs_depth_prev,abs_depth
+## print 'it is closed'
+
+def write_abs_values():
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
+ """here we write in the abs values written by monitor and prove"""
+## print 'write: ',
+## print cex_abs_depth,time_abs_prev,time_abs,abs_depth_prev,abs_depth
+ ab = open('%s_ab.txt'%f_name,'w')
+ ab.write(str(cex_abs_depth)+'\n')
+ ab.write(str(time_abs_prev)+'\n')
+ ab.write(str(time_abs)+'\n')
+ ab.write(str(abs_depth_prev)+'\n')
+ ab.write(str(abs_depth))
+ ab.close()
+
+def abs_done(time_remain):
+ """ heuristic to see if we are not making any progress and should quit
+ look at frame of last cex found (cex_abs_depth) for current abstraction using a parallel engine
+ look at depth of current abstraction (abs_depth) and last abstraction (abs_deptth_prev)
+ look at time between new abstractions time_abs - time_abs_prev.
+ compute approximate frames_per_sec
+ if frames_to_next_cex > frames_per_sec * time_remain
+ then won't get there is the time allowed.
+ We have to pass all the information along when we are doing things in parallel by writing a file
+ with this info in it and reading it in later. This is because monitor_and prove
+ runs in parallel and global variables are not passed around.
+ """
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
+## print 'checking if abs has enough time to next cex'
+ frames_to_next_cex = cex_abs_depth - abs_depth
+ div = time_abs - time_abs_prev
+ div = max(.1,div)
+ frames_per_sec = (abs_depth - abs_depth_prev)/div
+ if frames_per_sec <= 0:
+ return False #something wrong
+ print 'frames_per_sec = %0.2f, frames_to_next_cex = %d, time remaining = %0.2f'%(frames_per_sec, frames_to_next_cex, time_remain)
+ if frames_to_next_cex > 0.2*(frames_per_sec * time_remain): #later frames will take longer so factor of 5 here
+ print 'not enough abs time to next cex'
+ return True
+ return False
+
+##def gla_abs(t):
+## """ Do gate-level abstraction for F frames """
+## r = 100 *(1 - abs_ratio)
+## run_command('orpos; &get;&gla -dv -A %s_vabs.aig -T %d -R %d; &w %s_gla.aig'%(f_name,t,r,f_name))
+
def monitor_and_prove():
"""
- monitor and prove. whenever a new vabs is found, try to verify it
+ monitor and prove. Runs in parallel with abstraction method.
+ It looks for a new vabs and if found, will try to verify it in parallel
+ We want to write a file that has variables
+ cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
+ which will be used by abs_done called by gla_abs_iter which is to replace gla_abs
"""
global ifbip
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
#write the current aig as vabs.aig so it will be regularly verified at the beginning.
-## print 'Entering monitora_and_prove'
- print ifbip
- run_command('w %s_vabs.aig'%f_name)
- if ifbip == 0:
- run_command('w vabs.aig')
+ name = '%s_vabs.aig'%f_name
+ if os.access('%s'%name,os.R_OK): #make it so that there is no initial abstraction
+ os.remove('%s'%name)
initial_size = sizeof()
print 'initial size = ',
print initial_size
- funcs = [eval('(pyabc_split.defer(read_and_sleep)())')]
- t = 1000 # do not want to timeout verification engines.
- t = abs_time
- J = [9,19,23,24,34] #engines BMC3,PDRMsd,INTRPm,REACHY - engines for first time through when no abstraction
- funcs = funcs + create_funcs(J,t)
- mtds = ['read_and_sleep'] + sublist(methods,J)
- print 'methods = %s'%mtds
+ time_abs = time_abs_prev = time.time()
+ cex_abs_depth = 0
+ abs_depth = abs_depth_prev = 0
+ write_abs_values()
+## if read_and_sleep(5): # wait until first abstraction when res is False
+## #time has run out as controlled by abs_time
+## return [Undecided_no_reduction] + ['read_and_sleep']
+ t = abs_time +10
+ tt = time.time()
+## print 'first read and sleep done'
#a return of Undecided means that abstraction might be good and calling routine will check this
- while True:
+ while True: #here we iterate looking for a new abstraction and trying to prove it
time_done = abs_bad = 0
+ funcs = [eval('(pyabc_split.defer(read_and_sleep)())')]
+ J = sims+intrps+pdrs+bmcs
+ J = modify_methods(J,1)
+ funcs = funcs + create_funcs(J,t)
+ mtds = ['read_and_sleep'] + sublist(methods,J)
+ print 'methods = %s'%mtds
for i,res in pyabc_split.abc_split_all(funcs):
+## print 'Mon. & Pr.: ,
## print i,res
- if i == 0: # read and sleep terminated
+ if i == 0: # read_and_sleep terminated
if res == False: #found new abstraction
+ read_abs_values()
+ time_abs_prev = time_abs
+ time_abs = time.time()
+ print 'time between new abstractions = %0.2f'%(time_abs - time_abs_prev)
+ write_abs_values()
abs_bad = 0 #new abs starts out good.
if not initial_size == sizeof() and n_latches() > abs_ratio * initial_size[2]:
- return Undecided_no_reduction
+ return [Undecided_no_reduction]+['read_and_sleep']
else:
break
elif res == True: # read and sleep timed out
time_done = 1
-## print 'read_and_sleep timed out'
+ print 'read_and_sleep timed out'
if abs_bad:
- return Undecided_no_reduction
+ return [Undecided_no_reduction]+['read_and_sleep']
else: #abs is still good. Let other engines continue
- return Undecided #calling routine handles >Unsat all the same right now.
+ return [Undecided]+['read_and_sleep'] #calling routine handles >Unsat all the same right now.
else:
assert False, 'something wrong. read and sleep did not return right thing'
if i > 0: #got result from one of the verify engines
+ print 'monitor_and_prove: Method %s terminated'%mtds[i],
+## print i,res
+ if res == None:
+ print 'Method %s failed'%mtds[i]
+ continue
## print 'method %s found SAT in frame %d'%(mtds[i],cex_frame())
- if is_unsat():
- print 'Parallel method %s proved UNSAT on current abstraction'%mtds[i]
- return Unsat
- if is_sat(): #abstraction is not good yet.
- print 'Parallel method %s found SAT on current abstraction in frame %d'%(mtds[i],cex_frame())
+ if is_unsat() or res == Unsat or res == 'UNSAT':
+ print '\nParallel %s proved UNSAT on current abstr\n'%mtds[i]
+ return [Unsat] + [mtds[i]]
+ elif is_sat() or res < Unsat or res == 'SAT': #abstraction is not good yet.
+ print 'method = %s'%mtds[i]
+ if not mtds[i] == 'RareSim': #the other engines give a better estimate of true cex depth
+ read_abs_values()
+ cex_abs_depth = cex_frame()
+ write_abs_values()
+ print '\nParallel %s found SAT on current abstr in frame %d\n'%(mtds[i],cex_frame())
## print 'n_vabs = %d'%n_vabs
if initial_size == sizeof():# the first time we were working on an aig before abstraction
- return Sat
+ print initial_size == abstraction_size
+ return [Sat]+[mtds[i]]
## print 'current abstraction invalid'
abs_bad = 1
break #this kills off other verification engines working on bad abstraction
else: #one of the engines undecided for some reason - failed?
- print 'Parallel %s terminated without result on current abstraction'%mtds[i]
+ print '\nParallel %s terminated without result on current abstr\n'%mtds[i]
continue
if abs_bad and not time_done: #here we wait until have a new vabs.
- print 'current abstraction bad, waiting for new one'
-## print 'waiting for new abstraction'
- abc('r %s_abs.aig'%f_name) #read in the abstraction to destroy is_sat.
- res = read_and_sleep(5) #this will check every 5 sec, until abs_time sec has passed without new abs
+ time_remain = t -(time.time() - tt)
+ abc('r %s_abs.aig'%f_name) #read in the abstraction to destroy is_sat().
+ if abs_done(time_remain):
+ return [Undecided]+['timeout']
+ res = read_and_sleep(5) #this will check every 5 sec, until abs_time sec has passed without new abs
if res == False: #found new vabs. Now continue if vabs small enough
## print 'n_vabs = %d'%n_vabs
if (not initial_size == sizeof()) and n_latches() > abs_ratio * initial_size[2]:
- return Undecided_no_reduction
+ return [Undecided_no_reduction]+['no reduction']
else:
continue
elif res ==True: #read_and_sleep timed out
## print 'read_and_sleep timed out'
- return Undecided_no_reduction
+ return [Undecided_no_reduction]+['no reduction']
else:
break #this should not happen
elif abs_bad and time_done:
- print 'current abstraction bad, time has run out'
- return Undecided_no_reduction
+## print 'current abstraction bad, time has run out'
+ return [Undecided_no_reduction]+['no reduction']
elif time_done: #abs is good here
- print 'current abstraction still good, time has run out'
- return Undecided #this will cause calling routine to try to verify the final abstraction
+## print 'current abstraction still good, time has run out'
+ return [Undecided]+['reduction'] #this will cause calling routine to try to verify the final abstraction
#right now handles the same as Undecided_no_reduction-if time runs out we quit abstraction
else: #abs good and time not done
- print 'current abstraction still good, time has not run out'
- #we want to continue but after first time, we use expanded set of engines.
- funcs = [eval('(pyabc_split.defer(read_and_sleep)())')]
- funcs = funcs + create_funcs(J,t) #use old J first time
- mtds = ['read_and_sleep'] + sublist(methods,J)
- if initial_size == sizeof():
- print 'methods = %s'%mtds
- J = [7,9,19,23,24,30] #first time reflects that 7 and 30 are already being done
- continue #will try with new vabs
+ continue
+## print 'current abstraction still good, time has not run out'
+ return [len(funcs)]+['error']
def read_and_sleep(t=5):
"""
keep looking for a new vabs every 5 seconds. This is usually run in parallel with
- &vta -d
+ &vta -d or &gla
+ Returns False when new abstraction is found, and True when time runs out.
"""
+ global cex_abs_depth, abs_depth, abs_depth_prev, time_abs_prev, time_abs
#t is not used at present
tt = time.time()
T = 1000 #if after the last abstraction, no answer, then terminate
- T = abs_time
+ T = abs_time + 10
set_size()
name = '%s_vabs.aig'%f_name
## if ifbip > 0:
## name = '%s_vabs.aig'%f_name
-#### print 'name = %s'%name
+## print 'name = %s'%name
+ sleep(5)
while True:
if time.time() - tt > T: #too much time between abstractions
- print 'read_and_sleep timed out in %d sec.'%T
+## print 'read_and_sleep timed out in %d sec.'%T
return True
- if os.access('%s'%name,os.R_OK):
- abc('r %s'%name)
- abc('w %s_vabs_old.aig'%f_name)
- os.remove('%s'%name)
- if not check_size():
+ if os.access(name,os.R_OK):
+ #possible race condition
+ run_command('&r -s %s; &w %s_vabs_old.aig'%(name,f_name))
+## print '%s exists'%name
+ if not os.access(name,os.R_OK): #if not readable now then what was read in might not be OK.
+ print '%s does not exist'%name
+ continue
+## print '%s is read'%name
+## run_command('&r %s;read_status %s_vabs.status'%(name,f_name)) #need to use & space to keep the abstraction information
+ os.remove(name)
+ run_command('read_status %s_vabs.status'%f_name)
+## print '%s and %s_vabs.status have been read'%(name,f_name)
+## print 'reading %s_vabs.status'%f_name
+ #name is the derived model (not the model with abstraction info
+ run_command('&r -s %s_vabs_old.aig'%f_name)
+ run_command('&w %s_gla.aig'%f_name)
+ run_command('&gla_derive;&put')
+ run_command('w %s_gabs.aig'%f_name)
+## print '%s is removed'%name
+ read_abs_values()
+ time_abs_prev = time_abs
+ time_abs = time.time()
+## print 'abs values has been read'
+ run_command('read_status %s_vabs.status'%f_name)
+ abs_depth_prev = abs_depth
+ abs_depth = n_bmc_frames()
+ write_abs_values()
+## print 'abs values has been written'
+ time_remain = T - (time.time() - tt)
+ if abs_done(time_remain):
+ return True
+## if not check_size():
+ if True:
print '\nNew abstraction: ',
ps()
+## print 'Time = %0.2f'%(time.time() - tt)
set_size()
abc('w %s_abs.aig'%f_name)
return False
diff --git a/scripts/bmc2.sh b/scripts/simple.sh
index 3e9d9566..9c90fded 100644
--- a/scripts/bmc2.sh
+++ b/scripts/simple.sh
@@ -10,6 +10,5 @@ abc_root()
abc_dir=$(abc_root "$0")
bin_dir="${abc_dir}"/bin
-aig_file="$1"
-exec ${bin_dir}/abc -c "/rf ${aig_file} ; /pushredirect ; /pushdtemp ; bmc2 ; /popdtemp ; /popredirect ; /print_aiger_result"
+exec ${bin_dir}/abc -c "/simple_aiger $*"
diff --git a/scripts/bmc3.sh b/scripts/simple_liveness.sh
index bba01e20..8bf55100 100644
--- a/scripts/bmc3.sh
+++ b/scripts/simple_liveness.sh
@@ -10,6 +10,5 @@ abc_root()
abc_dir=$(abc_root "$0")
bin_dir="${abc_dir}"/bin
-aig_file="$1"
-exec "${bin_dir}"/abc -c "/rf ${aig_file} ; /pushredirect ; /pushdtemp ; bmc3 ; /popdtemp ; /popredirect ; /print_aiger_result"
+exec ${bin_dir}/abc -c "/simple_liveness_aiger $*"
diff --git a/scripts/dprove.sh b/scripts/simple_sat.sh
index 4289aad3..3a51ada6 100644
--- a/scripts/dprove.sh
+++ b/scripts/simple_sat.sh
@@ -10,6 +10,5 @@ abc_root()
abc_dir=$(abc_root "$0")
bin_dir="${abc_dir}"/bin
-aig_file="$1"
-exec ${bin_dir}/abc -c "/rf ${aig_file} ; /pushredirect ; /pushdtemp ; dprove ; /popdtemp ; /popredirect ; /print_aiger_result"
+exec ${bin_dir}/abc -c "/simple_sat_aiger $*"