summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaruch Sterin <baruchs@gmail.com>2012-06-22 14:30:20 -0700
committerBaruch Sterin <baruchs@gmail.com>2012-06-22 14:30:20 -0700
commit2f64033b3767ffdb16d8a530d813e39ee53b6e73 (patch)
tree5359ba9f1295539ad4d3ef431a45fd82055c1360
parent735a831e13684334d55b422993a80d94d356f180 (diff)
downloadabc-2f64033b3767ffdb16d8a530d813e39ee53b6e73.tar.gz
abc-2f64033b3767ffdb16d8a530d813e39ee53b6e73.tar.bz2
abc-2f64033b3767ffdb16d8a530d813e39ee53b6e73.zip
.
-rw-r--r--scripts/main.py68
-rw-r--r--scripts/par.py4086
2 files changed, 2612 insertions, 1542 deletions
diff --git a/scripts/main.py b/scripts/main.py
index 70dcf103..d21ced80 100644
--- a/scripts/main.py
+++ b/scripts/main.py
@@ -86,6 +86,9 @@ def constraints():
print("inductive constraints:")
x('constr ')
+def help(s):
+ run_command('%s -h'%s)
+
def strip():
"""Strip out each output of a multi-output example and write it
into a separate file"""
@@ -100,6 +103,12 @@ def strip():
abc('w p_%d.aig'%j)
print_circuit_stats()
+def fn_def(f,n):
+ filename = f.__code__.co_filename
+ firstline = f.__code__.co_firstlineno
+ lines = open(filename, "r").readlines()
+ print "".join(lines[firstline : n+firstline])
+
def lst(list,match):
result = []
for j in range(len(list)):
@@ -133,20 +142,54 @@ def blif2aig():
def la():
return list_aig('')
-def list_aig(s):
+def list_aig(s=''):
""" prnts out the sizes of aig files"""
#os.chdir('../ibm-web')
list_all = os.listdir('.')
dir = lst(list_all,'.aig')
dir.sort()
+ result = []
for j in range(len(dir)):
name = dir[j][:-4]
- if not s in name:
+ if not s_in_s(s,name):
continue
print '%s '%name,
abc('r %s.aig'%name)
ps()
- return dir
+ result = result + [name]
+ return result
+
+def list_blif(s=''):
+ """ prnts out the sizes of aig files"""
+ #os.chdir('../ibm-web')
+ list_all = os.listdir('.')
+ dir = lst(list_all,'.blif')
+ dir.sort()
+ result = []
+ for j in range(len(dir)):
+ name = dir[j][:-5]
+ if not s_in_s(s,name):
+ continue
+ print '%s: '%name,
+ abc('read_blif %s.blif'%name)
+ run_command('ps')
+ abc('st;zero;w %s.aig'%name)
+ result = result + [name]
+ return result
+
+def s_in_s(s1,s2):
+ ls1 = len(s1)
+ l = 1+len(s2)- ls1
+ if l< 0:
+ return False
+ else:
+ for j in range(l):
+ if s1 == s2[j:j+ls1]:
+ return True
+ else:
+ continue
+ return False
+
def convert_ibm():
""" converts blif files (with constraints?) into aig files"""
@@ -174,14 +217,16 @@ def cleanup():
list = os.listdir('.')
for j in range(len(list)):
name = list[j]
- if (('_smp' in name) or ('_save' in name) or ('_backup' in name) or ('_osave' in name)
- or ('_best' in name) or ('_gsrm' in name) or ('gore' in name) or
- ('_bip' in name) or ('sm0' in name) or ('gabs' in name)
- or ('temp' in name) or ('__' in name) or ('greg' in name) or ('tf2' in name)
- or ('gsrm' in name) or ('_rpm' in name ) or ('gsyn' in name) or ('beforerpm' in name)
- or ('afterrpm' in name) or ('initabs' in name) or ('.status' in name) or ('_init' in name)
- or ('_osave' in name) or ('tt_' in name) or ('_before' in name) or ('_after' in name)
- or ('_and' in name) or ('_final' in name) or ('_spec' in name) or ('temp.a' in name) or ('_sync' in name)
+ if ((s_in_s('_smp',name)) or (s_in_s('_save', name)) or (s_in_s('_backup', name)) or (s_in_s('_osave', name))
+ or (s_in_s('_best', name)) or (s_in_s('_gsrm', name)) or (s_in_s('gore', name)) or
+ (s_in_s('_bip', name)) or (s_in_s('sm0', name)) or (s_in_s('gabs', name))
+ or (s_in_s('temp', name)) or (s_in_s('__', name)) or (s_in_s('greg', name)) or (s_in_s('tf2', name))
+ or (s_in_s('gsrm', name)) or (s_in_s('_rpm', name )) or (s_in_s('gsyn', name)) or (s_in_s('beforerpm', name))
+ or (s_in_s('afterrpm', name)) or (s_in_s('initabs', name)) or (s_in_s('.status', name)) or (s_in_s('_init', name))
+ or (s_in_s('_osave', name)) or (s_in_s('tt_', name)) or (s_in_s('_before', name)) or (s_in_s('_after', name))
+ or (s_in_s('_and', name)) or (s_in_s('_final', name)) or (s_in_s('_spec', name)) or (s_in_s('temp.a', name))
+ or (s_in_s('_sync', name)) or (s_in_s('_old', name)) or (s_in_s('_cone_', name)) or (s_in_s('_abs', name))
+ or (s_in_s('_vabs', name))
):
os.remove(name)
@@ -215,6 +260,7 @@ def strip_names():
simplify()
run_command('w %s_smp.aig'%name)
+
def map_ibm():
os.chdir('../ibmmike2')
list_ibm = os.listdir('.')
diff --git a/scripts/par.py b/scripts/par.py
index 21f209df..c2f939ab 100644
--- a/scripts/par.py
+++ b/scripts/par.py
@@ -6,9 +6,12 @@ import os
import time
import math
import main
+import filecmp
+
global G_C,G_T,latches_before_abs,latches_before_pba,n_pos_before,x_factor,methods,last_winner
-global last_cex,JV,JP, cex_list,max_bmc, last_cx, pord_on
+global last_cex,JV,JP, cex_list,max_bmc, last_cx, pord_on, trim_allowed, temp_dec, abs_ratio, ifbip
+global if_no_bip
"""
The functions that are currently available from module _abc are:
@@ -22,6 +25,8 @@ int prob_status(); 1 = unsat, 0 = sat, -1 = unsolved
int cex_get()
int cex_put()
int run_command(char* cmd);
+int n_nodes();
+int n_levels();
bool has_comb_model();
bool has_seq_model();
@@ -44,7 +49,7 @@ stackno_gabs = stackno_gore = stackno_greg= 0
STATUS_UNKNOWN = -1
STATUS_SAT = 0
STATUS_UNSAT = 1
-RESULT = ('SAT' , 'SAT', 'UNSAT', 'UNDECIDED', 'UNDECIDED,', 'UNDECIDED' )
+RESULT = ('SAT' , 'SAT', 'UNSAT', 'UNDECIDED', 'UNDECIDED,', 'ERROR' )
Sat = Sat_reg = 0
Sat_true = 1
Unsat = 2
@@ -58,6 +63,8 @@ last_time = 0
j_last = 0
seed = 113
init_simp = 1
+temp_dec = True
+ifpord1 = 1
K_backup = init_time = 0
last_verify_time = 20
last_cex = last_winner = 'None'
@@ -68,20 +75,23 @@ sec_sw = False
sec_options = ''
cex_list = []
TERM = 'USL'
+ifbip = 0 # sets the abtraction method
+if_no_bip = 0 #True sets it up so it can use bip and reachx commands.
+abs_ratio = .5 #this controls when abstract stops
t_init = 2 #initial time for poor man's concurrency.
methods = ['PDR', 'INTRP', 'BMC',
'SIM', 'REACHX',
- 'PRE_SIMP', 'SUPER_PROVE2', 'PDRM', 'REACHM', 'BMC3','Min_Retime',
- 'For_Retime','REACHP','REACHN','PDRsd','prove_part_2',
- 'prove_part_3','verify','sleep','PDRMm','prove_part_1',
+ 'PRE_SIMP', 'Simple_prove', 'PDRM', 'REACHM', 'BMC3','Min_Retime',
+ 'For_Retime','REACHP','REACHN','PDR_sd','prove_part_2',
+ 'prove_part_3','verify','sleep','PDRM_sd','prove_part_1',
'run_parallel','INTRPb', 'INTRPm', 'REACHY', 'REACHYc','RareSim','simplify', 'speculate',
- 'quick_sec', 'BMC_J']
+ 'quick_sec', 'BMC_J', 'BMC2', 'extract -a', 'extract', 'PDRa']
#'0.PDR', '1.INTERPOLATION', '2.BMC', '3.SIMULATION',
-#'4.REACHX', '5.PRE_SIMP', '6.SUPER_PROVE', '7.PDRM', '8.REACHM', 9.BMC3'
+#'4.REACHX', '5.PRE_SIMP', '6.SUPER_PROVE(2)', '7.PDRM', '8.REACHM', 9.BMC3'
# 10. Min_ret, 11. For_ret, 12. REACHP, 13. REACHN 14. PDRseed 15.prove_part_2,
#16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,
#21.run_parallel, 22.INTRP_bwd, 23. Interp_m 24. REACHY 25. REACHYc 26. Rarity Sim 27. simplify
-#28. speculate, 29. quick_sec, 30 bmc3 -S
+#28. speculate, 29. quick_sec, 30 bmc3 -S, 31. BMC2 32. extract -a 33. extract 34. pdr_abstract
win_list = [(0,.1),(1,.1),(2,.1),(3,.1),(4,.1),(5,-1),(6,-1),(7,.1)]
FUNCS = ["(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
"(pyabc_split.defer(abc)('&get;,imc -vt=%f'%(t)))",
@@ -89,7 +99,8 @@ FUNCS = ["(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
"(pyabc_split.defer(simulate)(t))",
"(pyabc_split.defer(abc)('reachx -t %d'%t))",
"(pyabc_split.defer(pre_simp)())",
- "(pyabc_split.defer(super_prove)(2))",
+## "(pyabc_split.defer(super_prove)(2))",
+ "(pyabc_split.defer(simple)())",
"(pyabc_split.defer(pdrm)(t))",
"(pyabc_split.defer(abc)('&get;&reachm -vcs -T %d'%t))",
"(pyabc_split.defer(abc)('bmc3 -C 1000000 -T %f'%t))",
@@ -97,42 +108,50 @@ FUNCS = ["(pyabc_split.defer(abc)('&get;,pdr -vt=%f'%t))",
"(pyabc_split.defer(abc)('dr -m;&get;&lcorr;&dc2;&scorr;&put;dr'))",
"(pyabc_split.defer(abc)('&get;&reachp -vr -T %d'%t))",
"(pyabc_split.defer(abc)('&get;&reachn -vr -T %d'%t))",
- "(pyabc_split.defer(abc)('&get;,pdr -vt=%f -seed=521'%t))",
- "(pyabc_split.defer(prove_part_2)(K))",
- "(pyabc_split.defer(prove_part_3)(K))",
+## "(pyabc_split.defer(abc)('&get;,pdr -vt=%f -seed=521'%t))",
+ "(pyabc_split.defer(pdrseed)(t))",
+ "(pyabc_split.defer(prove_part_2)())",
+ "(pyabc_split.defer(prove_part_3)())",
"(pyabc_split.defer(verify)(JV,t))",
"(pyabc_split.defer(sleep)(t))",
"(pyabc_split.defer(pdrmm)(t))",
- "(pyabc_split.defer(prove_part_1)'(%d)'%(K))",
+ "(pyabc_split.defer(prove_part_1)())",
"(pyabc_split.defer(run_parallel)(JP,t,'TERM'))",
"(pyabc_split.defer(abc)('&get;,imc -bwd -vt=%f'%t))",
- "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 2 -T %f'%t))",
+## "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 2 -T %f'%t))",
+ "(pyabc_split.defer(abc)('int -C 1000000 -F 10000 -K 1 -T %f'%t))",
"(pyabc_split.defer(abc)('&get;&reachy -v -T %d'%t))",
"(pyabc_split.defer(abc)('&get;&reachy -cv -T %d'%t))",
"(pyabc_split.defer(simulate2)(t))",
"(pyabc_split.defer(simplify)())",
"(pyabc_split.defer(speculate)())",
"(pyabc_split.defer(quick_sec)(t))",
- "(pyabc_split.defer(bmc_s)(t))"
+ "(pyabc_split.defer(bmc_j)(t))",
+ "(pyabc_split.defer(abc)('bmc2 -C 1000000 -T %f'%t))",
+ "(pyabc_split.defer(extractax)('a'))",
+ "(pyabc_split.defer(extractax)())",
+ "(pyabc_split.defer(pdra)(t))",
]
## "(pyabc_split.defer(abc)('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max_bmc))))"
#note: interp given 1/2 the time.
allreachs = [4,8,12,13,24,25]
reachs = [24]
-allpdrs = [0,7,14,19]
-pdrs = [0,7]
-allbmcs = [2,9,30]
+##allpdrs = [14,7,34,19,0]
+allpdrs = [14,7,34,19]
+pdrs = [34,7,14]
+allbmcs = [2,9,30,31]
exbmcs = [2,9]
bmcs = [9,30]
allintrps = [1,22,23]
bestintrps = [23]
intrps = [23]
allsims = [3,26]
-sims = [3]
+sims = [26]
allslps = [18]
slps = [18]
+imc1 = [1]
-JVprove = [7,1,4,24]
+JVprove = [7,23,4,24]
JV = pdrs+intrps+bmcs+sims #sets what is run in parallel '17. verify' above
JP = JV + [27] # sets what is run in '21. run_parallel' above 27 simplify should be last because it can't time out.
#_____________________________________________________________
@@ -147,13 +166,14 @@ JP = JV + [27] # sets what is run in '21. run_parallel' above 27 simplify shoul
def initialize():
global xfi, max_bmc, last_time,j_last, seed, init_simp, K_backup, last_verify_time
global init_time, last_cex, last_winner, trim_allowed, t_init, sec_options, sec_sw
- global n_pos_before, n_pos_proved, last_cx, pord_on
+ global n_pos_before, n_pos_proved, last_cx, pord_on, temp_dec, abs_time
xfi = x_factor = 1 #set this to higher for larger problems or if you want to try harder during abstraction
max_bmc = -1
last_time = 0
j_last = 0
seed = 113
init_simp = 1
+ temp_dec = True
K_backup = init_time = 0
last_verify_time = 20
last_cex = last_winner = 'None'
@@ -166,7 +186,24 @@ def initialize():
cex_list = []
n_pos_before = n_pos()
n_pos_proved = 0
+ abs_time = 10000
+def set_abs_method():
+ """ controls the way we do abstraction, 0 = no bip, 1 = old way, 2 use new bip and -dwr
+ see absab()
+ """
+ global ifbip
+ print 'Set method of abstraction: \n0 = use vta and no bips, \n1 = old way, \nelse = use ,abs and -dwr'
+ s = raw_input()
+ s = remove_spaces(s)
+ if s == '1':
+ ifbip = 1 #old way
+ elif s == '0':
+ ifbip = 0 #use vta and no bip
+ else:
+ ifbip = 2 #use ,abc -dwr
+ print 'ifbip set to %d. Note engines are set only when read_file is done'%ifbip
+
def ps():
print_circuit_stats()
@@ -214,12 +251,13 @@ def convert(t):
def set_engines(N=0):
"""
+ Called only when read_file is called.
Sets the MC engines that are used in verification according to
- if there are 4 or 8 processors.
+ if there are 4 or 8 processors. if if_no_bip = 1, we will not use any bip and reachx engines
"""
- global reachs,pdrs,sims,intrps,bmcs
+ global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip
if N == 0:
- N = os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"])
+ N = n_proc = os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"])
if N == 1:
reachs = [24]
pdrs = [7]
@@ -237,17 +275,22 @@ def set_engines(N=0):
slps = [18]
elif N == 4:
reachs = [24]
- pdrs = [7]
+ pdrs = [34,7]
+ if if_no_bip:
+ allpdrs = pdrs = [7,19]
bmcs = [9,30]
intrps = [23]
sims = []
slps = [18]
elif N == 8:
reachs = [24]
- pdrs = [0,7]
+ pdrs = [34,7,14]
+ intrps = [23,1]
+ if if_no_bip:
+ allpdrs = pdrs = [7,19]
+ intrps = [23]
bmcs = [9,30]
- intrps = [23]
- sims = [3]
+ sims = [26] #use default
slps = [18]
def set_globals():
@@ -284,6 +327,38 @@ def remove_spaces(s):
y = y + t
return y
+def seq_name(f):
+ names = []
+ f = f + '_'
+ names = []
+ while len(f)>0:
+ j = f.find('_')
+ if j == -1:
+ break
+ names = names + [f[:j]]
+## print names
+ f = f[j+1:]
+## print f
+ return names
+
+def revert(f,n):
+ l = seq_name(f)
+ for j in range(n):
+ if len(l)>0:
+ l.pop()
+ name = construct(l)
+ return name
+
+def construct(l):
+ ll = l
+ name = ''
+ while len(l)>0:
+ name = '_'+ll.pop()+name
+ return name[1:]
+
+def process_sat():
+ l = seq_name(f_name)
+
def read_file_quiet(fname=None):
"""This is the main program used for reading in a new circuit. The global file name is stored (f_name)
Sometimes we want to know the initial starting name. The file name can have the .aig extension left off
@@ -292,8 +367,8 @@ def read_file_quiet(fname=None):
correct f_name."""
global max_bmc, f_name, d_name, initial_f_name, x_factor, init_initial_f_name, win_list,seed, sec_options
global win_list, init_simp, po_map
- set_engines(4) #temporary
- ps()
+ abc('fraig_restore') #clear out any residual fraig_store
+ set_engines() #temporary
init_simp = 1
win_list = [(0,.1),(1,.1),(2,.1),(3,.1),(4,.1),(5,-1),(6,-1),(7,.1)] #initialize winning engine list
po_map = range(n_pos())
@@ -305,19 +380,34 @@ def read_file_quiet(fname=None):
print 'Type in the name of the aig file to be read in'
s = raw_input()
s = remove_spaces(s)
+## print s
else:
s = fname
if s[-4:] == '.aig':
f_name = s[:-4]
+ elif s[-5:] == '.blif':
+ f_name = s[:-5]
else:
f_name = s
s = s+'.aig'
## run_command(s)
- run_command('&r %s;&put'%s)
+## print s
+ if s[-4:] == '.aig':
+ run_command('&r %s;&put'%s) #warning: changes names to generic ones.
+ else: #this is a blif file
+ run_command('r %s'%s)
+ abc('st;&get;&put') #changes names to generic ones for doing cec later.
+ run_command('zero;w %s.aig'%f_name)
set_globals()
- initial_f_name = f_name
- init_initial_f_name = f_name
+ init_initial_f_name = initial_f_name = f_name
+ print 'Initial f_name = %s'%f_name
abc('addpi')
+ #check_pos() #this removes constant outputs with a warning -
+ #needed when using iso. Need another fix for using iso.
+ run_command('fold')
+ ps()
+ return
+
def read_file():
global win_list, init_simp, po_map
@@ -357,6 +447,10 @@ def bmc_depth():
max_bmc = max(b,max_bmc)
return max_bmc
+def null_status():
+ """ resets the status to the default values but note that the &space is changed"""
+ abc('&get;&put')
+
def set_max_bmc(b):
""" Keeps increasing max_bmc which is the maximum number of time frames for
which the current circuit is known to be UNSAT for"""
@@ -370,20 +464,24 @@ def print_circuit_stats():
o = n_pos()
l = n_latches()
a = n_ands()
+ s='ANDs'
+ if a == -1:
+ a = n_nodes()
+ s = 'Nodes'
b = max(max_bmc,bmc_depth())
c = cex_frame()
if b>= 0:
if c>=0:
- print 'PIs=%d,POs=%d,FF=%d,ANDs=%d,max depth=%d,CEX depth=%d'%(i,o,l,a,b,c)
+ print 'PIs=%d,POs=%d,FF=%d,%s=%d,max depth=%d,CEX depth=%d'%(i,o,l,s,a,b,c)
elif is_unsat():
- print 'PIs=%d,POs=%d,FF=%d,ANDs=%d,max depth = infinity'%(i,o,l,a)
+ print 'PIs=%d,POs=%d,FF=%d,%s=%d,max depth = infinity'%(i,o,l,s,a)
else:
- print 'PIs=%d,POs=%d,FF=%d,ANDs=%d,max depth=%d'%(i,o,l,a,b)
+ print 'PIs=%d,POs=%d,FF=%d,%s=%d,max depth=%d'%(i,o,l,s,a,b)
else:
if c>=0:
- print 'PIs=%d,POs=%d,FF=%d,ANDs=%d,CEX depth=%d'%(i,o,l,a,c)
+ print 'PIs=%d,POs=%d,FF=%d,%s=%d,CEX depth=%d'%(i,o,l,s,a,c)
else:
- print 'PIs=%d,POs=%d,FF=%d,ANDs=%d'%(i,o,l,a)
+ print 'PIs=%d,POs=%d,FF=%d,%s=%d'%(i,o,l,s,a)
def q():
exit()
@@ -431,28 +529,36 @@ def simplify():
on the problem size """
set_globals()
abc('&get;&scl;&lcorr;&put')
- n =n_ands()
p_40 = False
- if (40000 < n and n < 100000):
+ n =n_ands()
+ if n >= 70000:
+ abc('&get;&scorr -C 0;&put')
+ n =n_ands()
+ if n >= 100000:
+ abc('&get;&scorr -k;&put')
+ if (70000 < n and n < 100000):
p_40 = True
abc("&get;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&fraig;&dc2;&put;dr")
n = n_ands()
- if n<60000:
+## if n<60000:
+ if n < 80000:
abc("&get;&scorr -F 2;&put;dc2rs")
else: # n between 60K and 100K
abc("dc2rs")
n = n_ands()
- if (30000 < n and n <= 40000):
+## if (30000 < n and n <= 40000):
+ if (60000 < n and n <= 70000):
if not p_40:
abc("&get;&dc2;&put;dr;&get;&lcorr;&dc2;&put;dr;&get;&scorr;&fraig;&dc2;&put;dr")
abc("&get;&scorr -F 2;&put;dc2rs")
else:
abc("dc2rs")
n = n_ands()
- if n <= 30000:
+## if n <= 60000:
+ if n <= 70000:
abc('scl -m;drw;dr;lcorr;drw;dr')
nn = max(1,n)
- m = int(min( 60000/nn, 16))
+ m = int(min( 70000/nn, 16))
if m >= 1:
j = 1
while j <= m:
@@ -465,7 +571,12 @@ def simplify():
if check_size():
break
j = 2*j
+ print 'ANDs=%d,'%n_ands(),
+ if n_ands() >= .98 * nands:
+ break
continue
+ if not check_size():
+ print '\n'
return get_status()
def simulate2(t):
@@ -477,19 +588,20 @@ def simulate2(t):
btime = time.clock()
diff = 0
while True:
- f = 5
- w = 255
+ f = 100
+ w = 16
+ b = 16
+ r = 500
for k in range(9): #this controls how deep we go
- f = min(f *2, 3500)
+ f = min(f*2, 3500)
w = max(((w+1)/2)-1,1)
- abc('sim3 -m -F %d -W %d -R %d'%(f,w,seed))
+ abc('sim3 -F %d -W %d -N %d -R %d -B %d'%(f,w,seed,r,b))
seed = seed+23
if is_sat():
return 'SAT'
if ((time.clock()-btime) > t):
return 'UNDECIDED'
-
def simulate(t):
abc('&get')
result = eq_simulate(t)
@@ -545,7 +657,38 @@ def refine_with_cex():
#print ' %d FF'%n_latches()
return
-def abstraction_refinement(latches_before,NBF):
+def iter_tempor():
+ na = n_ands()
+ while True:
+ abc('w save.aig')
+ npi = n_pis()
+ print npi
+ abc('tempor -T 5 -F 8')
+ print 'tempor = ',
+ ps()
+ x = trim()
+## if n_pis() > 2*npi:
+## abc('r save.aig')
+## return 'UNDECIDED'
+ abc('dr')
+ print 'retime = ',
+ ps()
+ simplify()
+ trim()
+ print 'simplify -> trim = ',
+ ps()
+ if n_ands() > na:
+ abc('r save.aig')
+ ps()
+ print 'No improvement'
+ return 'UNDECIDED'
+ na = n_ands()
+ ps()
+ if n_latches() == 0:
+ return RESULT[check_sat()]
+
+
+def abstraction_refinement(latches_before,NBF,ratio=.75):
"""Subroutine of 'abstract' which does the refinement of the abstracted model,
using counterexamples found by BMC or BDD reachability"""
global x_factor, f_name, last_verify_time, x, win_list, last_winner, last_cex, t_init, j_last, sweep_time
@@ -559,6 +702,8 @@ def abstraction_refinement(latches_before,NBF):
J = slps+intrps+pdrs+bmcs+sims
print sublist(methods,J)
last_verify_time = t = x_factor*max(50,max(1,2.5*G_T))
+ t = 1000 #temporary
+ t = abs_time
initial_verify_time = last_verify_time = t
reg_verify = True
print 'Verify time set to %d'%last_verify_time
@@ -566,19 +711,17 @@ def abstraction_refinement(latches_before,NBF):
generate_abs(1) #generate new gabs file from refined greg file
set_globals()
latches_after = n_latches()
- if rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1:
- break
- if latches_after >= .90*latches_before:
+ if small_abs(ratio):
break
t = last_verify_time
yy = time.time()
abc('w %s_beforerpm.aig'%f_name)
rep_change = reparam() #new - must do reconcile after to make cex compatible
abc('w %s_afterrpm.aig'%f_name)
- if reg_verify:
- status = verify(J,t)
- else:
- status = pord_1_2(t)
+## if reg_verify:
+ status = verify(J,t)
+## else:
+## status = pord_1_2(t)
###############
if status == Sat_true:
print 'Found true cex'
@@ -587,6 +730,7 @@ def abstraction_refinement(latches_before,NBF):
if status == Unsat:
return status
if status == Sat:
+ abc('write_status %s_after.status'%f_name)
reconcile(rep_change) # makes the cex compatible with original before reparam and puts original in work space
abc('write_status %s_before.status'%f_name)
refine_with_cex()
@@ -600,11 +744,26 @@ def abstraction_refinement(latches_before,NBF):
print '**** Latches reduced from %d to %d'%(latches_before, n_latches())
return Undecided_reduction
-def abstract():
+def small_abs(ratio=.75):
+ """ tests is the abstraction is too large"""
+ return ((rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1)
+ or (n_latches() >= ratio*latches_before_abs))
+
+##def abstract(if_bip=True):
+## global ratio
+## if if_bip:
+## return abstractb(True) #old method using abstraction refinement
+## else:
+## return abstractb(False) #not using bip and reachx
+
+def abstractb():
""" abstracts using N Een's method 3 - cex/proof based abstraction. The result is further refined using
- simulation, BMC or BDD reachability"""
+ simulation, BMC or BDD reachability. abs_ratio is the the limit for accepting an abstraction"""
global G_C, G_T, latches_before_abs, x_factor, last_verify_time, x, win_list, j_last, sims
- global latches_before_abs, ands_before_abs, pis_before_abs
+ global latches_before_abs, ands_before_abs, pis_before_abs, abs_ratio
+ if ifbip < 1:
+ print 'using ,abs in old way'
+ tt = time.time()
j_last = 0
set_globals()
#win_list = []
@@ -618,18 +777,19 @@ def abstract():
# fork off BMC3 and PDRm along with initial abstraction
t = 10000 #want to run as long as initial abstract takes.
## J = sims+pdrs+bmcs+intrps
- J = pdrs+bmcs+bestintrps
- if n_latches() < 80:
- J = J + [4]
+ J = slps+pdrs+bmcs+intrps
+ J = modify_methods(J,1)
+## if n_latches() < 80:
+## J = J + [4]
funcs = create_funcs(J,t) + funcs
mtds = sublist(methods,J) + ['initial_abstract'] #important that initial_abstract goes last
- m,NBF = fork_last(funcs,mtds)
+ m,result = fork_last(funcs,mtds)
if is_sat():
print 'Found true counterexample in frame %d'%cex_frame()
return Sat_true
if is_unsat():
return Unsat
- set_max_bmc(NBF)
+## set_max_bmc(NBF)
NBF = bmc_depth()
print 'Abstraction good to %d frames'%max_bmc
#note when things are done in parallel, the &aig is not restored!!!
@@ -639,23 +799,33 @@ def abstract():
ps()
abc('w %s_init_abs.aig'%f_name)
latches_after = n_latches()
-## if latches_after >= .90*latches_before_abs:
- if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
+## if latches_after >= .90*latches_before_abs: #the following should match similar statement
+## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or
+## (latches_after >= .75*latches_before_abs)):
+ if small_abs(abs_ratio):
abc('r %s_before_abs.aig'%f_name)
print "Little reduction!"
+ print 'Abstract time wasted = %0.2f'%(time.time()-tt)
return Undecided_no_reduction
sims_old = sims
sims=sims[:1] #make it so that rarity sim is not used since it can't find a cex
- result = abstraction_refinement(latches_before_abs, NBF)
+ result = abstraction_refinement(latches_before_abs, NBF,abs_ratio)
sims = sims_old
if result <= Unsat:
return result
## if n_latches() >= .90*latches_before_abs:
- if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
+## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
## if rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1:
+ if small_abs(abs_ratio): #r is ratio of final to initial latches in absstraction. If greater then True
abc('r %s_before_abs.aig'%f_name) #restore original file before abstract.
- print "Little reduction!"
- result = Undecided_no_reduction
+ print "Little reduction! ",
+ print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+ result = Undecided_no_reduction
+ return result
+ #new
+ else:
+ write_file('abs') #this is only written if it was not solved and some change happened.
+ print 'Abstract time = %0.2f'%(time.time()-tt)
return result
def initial_abstract_old():
@@ -676,18 +846,23 @@ def initial_abstract_old():
abc('&w %s_greg.aig'%f_name)
## ps()
-def initial_abstract():
- global G_C, G_T, latches_before_abs, x_factor, last_verify_time, x, win_list, max_bmc
+def initial_abstract(t=100):
+ global G_C, G_T, latches_before_abs, x_factor, last_verify_time, x, win_list, max_bmc, ifbip
set_globals()
time = max(1,.1*G_T)
+ time = min(time,t)
abc('&get;,bmc -vt=%f'%time)
set_max_bmc(bmc_depth())
c = 2*G_C
f = max(2*max_bmc,20)
b = min(max(10,max_bmc),200)
- t = x_factor*max(1,2*G_T)
- s = min(max(3,c/30000),10) # stability between 3 and 10
+ t1 = x_factor*max(1,2*G_T)
+ t = max(t1,t)
+ s = min(max(3,c/30000),10) # stability between 3 and 10
cmd = '&get;,abs -bob=%d -stable=%d -timeout=%d -vt=%d -depth=%d'%(b,s,t,t,f)
+ if ifbip == 2:
+ cmd = '&get;,abs -bob=%d -stable=%d -timeout=%d -vt=%d -depth=%d -dwr=%s_vabs'%(b,s,t,t,f,f_name)
+ print 'Using -dwr=%s_vabs'%f_name
## print cmd
print 'Running initial_abstract with bob=%d,stable=%d,time=%d,depth=%d'%(b,s,t,f)
abc(cmd)
@@ -774,7 +949,7 @@ def n_abs_latches():
def pba_loop(F):
n = n_abs_latches()
while True:
- run_command('&abs_pba -v -C 100000 -F %d'%F)
+ run_command('&abs_pba -v -C 0 -F %d'%F)
abc('&w pba_temp.aig')
abc('&abs_derive;&put')
abc('&r pba_temp.aig')
@@ -785,39 +960,50 @@ def pba_loop(F):
if N > n:
print 'cex found'
break
-## else:
-## break
-
-##def sec_m(options):
-## """
-## This assumes that a miter has been loaded into the workspace. The miter has been
-## constructed using &miter. Then we demiter it using command 'demiter'
-## This produces parts 0 and 1, renamed A_name, and B_name.
-## We then do speculate immediately. Options are passed to &srm2 using the
-## options given by sec_options
-## """
-## global f_name,sec_sw, A_name, B_name, sec_options
-## A_name = f_name+'_part0'
-## B_name = f_name+'_part1'
-## run_command('demiter')
-## #simplify parts A and B here?
-## abc('r %s_part1.aig;scl;dc2;&get;&lcorr;&scorr;&put;dc2;dc2;w %s_part1.aig'%(f_name,f_name))
-## ps()
-## abc('r %s_part0.aig;scl;dc2;&get;&lcorr;&scorr;&put;dc2;dc2;w %s_part0.aig'%(f_name,f_name))
-## ps()
-## #simplify done
-## f_name = A_name
-## return sec(B_name,options)
-def ss(options):
- """Alias for super_sec"""
+def ssm(options=''):
+ """ Now this should be the same as super_prove(1) """
+ y = time.time()
+ result = prove_part_1() # simplify first
+ if result == 'UNDECIDED':
+ result = ss(options)
+ print 'Total time taken on file %s by function ssm(%s) = %d sec.'%(initial_f_name,options,(time.time() - y))
+ return result
+
+def ssmg():
+ return ssm('g')
+def ssmf():
+ return ssm('f')
+
+
+def ss(options=''):
+ """
+ Alias for super_sec
+ This is the preferred command if the problem (miter) is suspected to be a SEC problem
+ """
global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time, sec_options
sec_options = options
- print 'Executing speculate'
+ print '\n*************Executing speculate************'
+ y = time.time()
+ abc('scl')
result = speculate()
+ # if result is 1 then it is a real SAT since we did not do anything before
+ if result > 2: #save the result and read in with /rf so that files are initialized correctly
+ if not '_spec' in f_name:
+ write_file('spec') #make sure we do not overwrite original file
+ read_file_quiet('%s'%f_name) #this resets f_name and initial_f_name etc.
+ print '\n*************Executing super_prove ************'
+ print 'New f_name = %s'%f_name
+ result = sp()
+ if result == 'SAT':
+ result = 'UNDECIDED' #because speculation was done initially.
+ elif result == 1:
+ result = 'SAT'
+ else:
+ result = RESULT[result]
+ print 'Total time taken on file %s by function ss(%s) = %d sec.'%(initial_f_name,options,(time.time() - y))
return result
-
def quick_sec(t):
## fb_name = f_name[:-3]+'New'
## abc('&get;&miter -s %s.aig;&put'%fb_name)
@@ -831,57 +1017,38 @@ def quick_sec(t):
else:
return'UNDECIDED'
-def pp_sec():
+def pre_sec():
+ """ put files to be compared into Old and New aigs. Simplify, but
+ turn off reparameterization so that PIs in Old and New match after simplification.
+ """
+ global trim_allowed
+## trim_allowed = False
+## print 'trim allowed = ',trim_allowed
print 'First file: ',
- read_file_quiet()
+ read_file_quiet() #note - reads into & space and then does &put
+ ps()
+ prs(False)
ps()
- abc('&w original_secOld.aig')
+ abc('&w Old.aig')
print 'Second file: ',
read_file_quiet()
ps()
- abc('&w original_secNew.aig')
-
-
-def sup_sec():
- global TERM
- """
- form miter of original_sec(Old,New), and then run in parallel quick_sec(28), speculate(29), and
- run_parallel(21) with JP set to
- """
- global f_name,sec_sw, A_name, B_name, sec_options
- #preprocess files to get rid of dangling FF
- abc('&r original_secOld.aig;&scl -ce;&w Old.aig')
- abc('&r original_secNew.aig;&scl -ce;&w New.aig')
- #done preprocessing
- read_file_quiet('Old')
- abc('&get;&miter -s New.aig;&put')
- set_globals()
- yy = time.time()
- A_name = f_name # Just makes it so that we can refer to A_name later in &srm2
- B_name = 'New'
- f_name = 'miter'
- abc('w %s.aig'%f_name)
- sec_options = 'l'
- sec_sw = True
- JP = [18,27] # sleep and simplify. JP sets the things run in parallel in method 21.
- #TERM sets the stopping condition
- TERM = 'USL' #Sat, Unsat or Last
- print sublist(methods,[27,21,28,29]+JV)
- t = 100 #this is the amount of time to run
- #18 is controlled by t, 28(speculate) is not, 29(quick_sec) does quick_simp and then controlled by t
- run_parallel([21,28,29],t,'US') #21 is run_parallel with JP and TERM.
- #run simplify for t sec, speculate,
- #and quick_sec (quick_simp and then verify(JV) for t)
- if is_unsat():
- return 'UNSAT'
- if is_sat():
- return 'SAT'
- else:
- return 'UNDECIDED' # should do sp or something
+ prs(False)
+ ps()
+ abc('&w New.aig')
+def cec():
+ print 'Type in the name of the aig file to be compared against'
+ s = raw_input()
+ s = remove_spaces(s)
+ if not 'aig' in s:
+ s = s+'.aig'
+ run_command("&get;&cec -v %s"%s)
def sec(B_part,options):
"""
+ Use this for AB filtering and not sup_sec
+ Use pp_sec to make easy names for A and B, namely Old and New.
This assumes that the original aig (renamed A_name below) is already read into the working space.
Then we form a miter using &miter between two circuits, A_name, and B_name.
We then do speculate immediately. Optionally we could simplify A and B
@@ -896,34 +1063,36 @@ def sec(B_part,options):
B_name = B_part
run_command('&get; &miter -s %s.aig; &put'%B_name)
## abc('orpos')
- f_name = A_name+'.'+B_name+'_miter' # reflect that we are working on a miter.
+ f_name = A_name+'_'+B_name+'_miter' # reflect that we are working on a miter.
abc('w %s.aig'%f_name)
print 'Miter = ',
ps()
-## result = pre_simp()
-## write_file('smp')
-## if result <= Unsat:
-## return RESULT[result]
sec_options = options
if sec_options == 'ab':
sec_options = 'l' #it will be changed to 'ab' after &equiv
sec_sw = True
- result = speculate() #should do super_sec here.
+ result = speculate()
sec_options = ''
sec_sw = False
if result <= Unsat:
result = RESULT[result]
else:
result = sp()
+ if result == 'SAT':
+ result = 'UNDECIDED'
print 'Total time = %d'%(time.time() - yy)
return result
def filter(opts):
global A_name,B_name
+## print 'Filtering with options = %s'%opts
""" This is for filter which effectively only recognizes options -f -g"""
if (opts == '' or opts == 'l'): #if 'l' this is used only for initial &equiv2 to get initial equiv creation
return
if opts == 'ab':
+ print A_name ,
+ print B_name
+ run_command('&ps')
run_command('&filter -f %s.aig %s.aig'%(A_name,B_name))
return
if not opts == 'f':
@@ -946,56 +1115,78 @@ def check_if_spec_first():
abc('&r check_and.aig')
return result
-def initial_speculate():
+def initial_speculate(sec_opt=''):
global sec_sw, A_name, B_name, sec_options, po_map
set_globals()
+ if sec_options == '':
+ sec_options = sec_opt
t = max(1,.5*G_T)
r = max(1,int(t))
-## print 'Running &equiv3'
-## abc('&w before3.aig')
+ print 'Initial sec_options = %s'%sec_options
if sec_options == 'l':
cmd = "&get; &equiv3 -lv -F 20 -T %f -R %d"%(3*t,15*r)
else:
cmd = "&get; &equiv3 -v -F 20 -T %f -R %d"%(3*t,15*r)
-## print cmd
+ print cmd
abc(cmd)
## print 'AND space after &equiv3: ',
-## run_command('&ps')
+ run_command('&ps')
if (sec_options == 'l'):
if sec_sw:
sec_options = 'ab'
else:
sec_options = 'f'
- print sec_options
+## print 'A_name: ',
+## run_command('r %s.aig;ps'%A_name)
+## print 'B_name: ',
+## run_command('r %s.aig;ps'%B_name)
filter(sec_options)
abc('&w initial_gore.aig')
## print 'Running &srm'
if sec_sw:
+ print 'miter: ',
+ run_command('&ps')
+ print 'A_name: ',
+ run_command('r %s.aig;ps'%A_name)
+ print 'B_name: ',
+ run_command('r %s.aig;ps'%B_name)
cmd = "&srm2 -%s %s.aig %s.aig; r gsrm.aig; w %s_gsrm.aig; &w %s_gore.aig"%(sec_options,A_name,B_name,f_name,f_name)
abc(cmd)
po_map = range(n_pos())
return
else:
+## abc('&r %s_gore.aig; &srm ; r gsrm.aig; w %s_gsrm.aig'%(f_name,f_name))
cmd = "&srm; r gsrm.aig; w %s_gsrm.aig; &w %s_gore.aig"%(f_name,f_name)
+## print 'Running %s'%cmd
abc(cmd)
- if (n_pos() > 100):
- sec_options = 'g'
- print 'sec_option changed to "g"'
- filter(sec_options)
- abc(cmd)
+ print 'done with &srm'
po_map = range(n_pos())
+ if sec_options == '':
+ if n_pos() > 200:
+ sec_options = 'g'
+ print 'sec_options set to %s'%'g'
+ abc('&r %s_gore.aig'%f_name)
+ filter(sec_options)
+ print 'Running &srm'
+ cmd = "&srm; r gsrm.aig; ps;w %s_gsrm.aig;&ps; &w %s_gore.aig"%(f_name,f_name)
+ print 'Running %s'%cmd
+ abc(cmd)
+ po_map = range(n_pos())
+
def test_against_original():
'''tests whether we have a cex hitting an original PO'''
- abc('&w %s_save.aig'%f_name) #we oreserve whatever was in the & space
- abc('&r %s_gore.aig'%f_name)
+ abc('&w %s_save.aig'%f_name) #we preserve whatever was in the & space
+ abc('&r %s_gore.aig'%f_name) #This is the original
abc('testcex')
PO = cex_po()
+## print 'test_against original gives PO = %d'%PO
abc('&r %s_save.aig'%f_name)
if PO > -1:
## print 'cex fails an original PO'
return True
else:
+ abc('write_status %s_status.status'%f_name)
return False
def set_cex_po(n=0):
@@ -1007,29 +1198,34 @@ def set_cex_po(n=0):
"""
global n_pos_before, n_pos_proved #these refer to real POs
if n == 0:
- abc('testcex -a -O %d'%(n_pos_before-n_pos_proved))
+ abc('testcex -a -O %d'%(n_pos_before-n_pos_proved)) #test regular AIG space
else:
- abc('testcex -O %d'%(n_pos_before-n_pos_proved))
+ abc('testcex -O %d'%(n_pos_before-n_pos_proved)) #test the &-AIG
PO = cex_po()
+## print 'cex_po = %d, n_pos_before = %d, n_pos_proved = %d'%(PO, n_pos_before, n_pos_proved)
if PO >= (n_pos_before - n_pos_proved): #cex_po is not an original
-## print 'cex PO = %d'%PO
+## print '1. cex PO = %d'%PO
return PO # after original so take it.
if n == 0:
- abc('testcex -a')
+ abc('testcex -a') #test regular
else:
- abc('testcex')
+ abc('testcex') #test &space
PO = cex_po()
+## print '2. cex PO = %d'%PO
cx = cex_get()
if PO > -1:
if test_against_original(): #this double checks that it is really an original PO
cex_put(cx)
+## print 'test_against_original was valid'
return PO
else:
+ print '1. PO not valid'
return -1 #error
-## if PO < 0 or PO >= (n_pos_before - n_pos_proved): # not a valid cex because already tested outside original.
-## PO = -1 #error
+ if PO < 0 or PO >= (n_pos_before - n_pos_proved): # not a valid cex because already tested outside original.
+ print '2. PO is not valid'
+ PO = -1 #error
+## print '3. cex PO = %d'%PO
return PO
-## print 'cex PO = %d'%PO
def speculate():
"""Main speculative reduction routine. Finds candidate sequential equivalences and refines them by simulation, BMC, or reachability
@@ -1039,41 +1235,23 @@ def speculate():
global last_cx
last_cx = 0
ifpord1 = 1
+ initial_po_size = last_srm_po_size = n_pos()
+ initial_sizes = [n_pis(),n_pos(),n_latches(),n_ands()]
if sec_sw:
print 'A_name = %s, B_name = %s, f_name = %s, sec_options = %s'%(A_name, B_name, f_name, sec_options)
- elif n_ands()> 6000 and sec_options == '':
+ elif n_ands()> 36000 and sec_options == '':
sec_options = 'g'
print 'sec_options set to "g"'
def refine_with_cex():
"""Refines the gore file to reflect equivalences that go away because of cex"""
global f_name
-## print 'Refining',
-## abc('&r %s_gore.aig;&w %s_gore_before.aig'%(f_name,f_name))
abc('write_status %s_before.status'%f_name)
abc('&r %s_gore.aig; &resim -m'%f_name)
filter(sec_options)
run_command('&w %s_gore.aig'%f_name)
return
-## def refine_with_cexs():
-## """Refines the gore file to reflect equivalences that go away because of cexs in cex_list"""
-## global f_name, cex_list
-## print 'Multiple refining'
-## abc('&r %s_gore.aig'%f_name)
-#### run_command('&ps')
-## for j in range(len(cex_list)):
-## cx = cex_list[j]
-## if cx == None:
-## continue
-## cex_put(cx)
-## run_command('&resim -m') #put the jth cex into the cex space and use it to refine the equivs
-## filter(sec_options)
-## abc('&w %s_gore.aig'%f_name)
-#### run_command('&ps')
-## cex_list = [] #done with this list.
-## return
-
def set_cex(lst):
""" assumes only one in lst """
for j in range(len(lst)):
@@ -1084,40 +1262,19 @@ def speculate():
cex_put(cx)
break
-## def test_all_cexs(lst):
-## """tests all cex"s to see if any violate original POs
-## if it does, return the original PO number
-## if not return -1
-## """
-## global n_pos_before, cex_list
-## run_command('&r %s_gore.aig'%f_name)
-## for j in range(len(cex_list)):
-## cx = lst[j]
-## if cx == None:
-## continue
-## cex_put(cx)
-## PO = set_cex_po() #if cex falsifies non-real PO it will set this first
-## if PO == -1: # there is a problem with cex since it does not falsify any PO
-## continue #we continue because there may be another valid cex in list
-## return PO #we will only process one cex for now.
-## return -1 #a real PO is not falsified by any of the cexs
-
def generate_srm():
"""generates a speculated reduced model (srm) from the gore file"""
global f_name, po_map, sec_sw, A_name, B_name, sec_options, n_pos_proved
## print 'Generating'
pos = n_pos()
ab = n_ands()
+ abc('w %s_oldsrm.aig'%f_name) #save for later purposes
if sec_sw:
run_command('&r %s_gore.aig; &srm2 -%s %s.aig %s.aig; r gsrm.aig; w %s_gsrm.aig'%(f_name,sec_options,A_name,B_name,f_name))
else:
abc('&r %s_gore.aig; &srm ; r gsrm.aig; w %s_gsrm.aig'%(f_name,f_name)) #do we still need to write the gsrm file
## ps()
po_map = range(n_pos())
-## cmd = '&get;&lcorr;&scorr;&trim -i;&put;w %s_gsrm.aig'%f_name
-## cmd = 'lcorr;&get;&trim -i;&put;w %s_gsrm.aig'%f_name
-## print 'Executing %s'%cmd
-## abc(cmd)
ps()
n_pos_proved = 0
return 'OK'
@@ -1126,10 +1283,12 @@ def speculate():
n_pos_proved = 0
n_latches_before = n_latches()
set_globals()
- t = max(1,.5*G_T)#irrelevant
- r = max(1,int(t))
+## t = max(1,.5*G_T)#irrelevant
+## r = max(1,int(t))
+ t = 1000
j_last = 0
- J = sims+pdrs+bmcs+intrps
+ J = slps+sims+pdrs+bmcs+intrps
+ J = modify_methods(J,1)
funcs = [eval('(pyabc_split.defer(initial_speculate)())')]
funcs = create_funcs(J,10000)+funcs #want other functins to run until initial speculate stops
mtds = sublist(methods,J) + ['initial_speculate'] #important that initial_speculate goes last
@@ -1165,9 +1324,10 @@ def speculate():
simp_sw = init = True
print '\nIterating speculation refinement'
sims_old = sims
- sims = sims[:1] #make it so rarity simulation is not used since it can't find a cex.
+ sims = sims[:1]
J = slps+sims+pdrs+intrps+bmcs
- print sublist(methods,J)
+ J = modify_methods(J)
+## print sublist(methods,J)
t = max(50,max(1,2*G_T))
last_verify_time = t
print 'Verify time set to %d'%last_verify_time
@@ -1183,6 +1343,7 @@ def speculate():
abc('w %s_gsrm_before.aig'%f_name)
set_size()
result = generate_srm()
+ last_srm_po_size = n_pos()
yy = time.time()
# if the size of the gsrm did not change after generating a new gsrm
# and if the cex is valid for the gsrm, then the only way this can happen is if
@@ -1209,8 +1370,8 @@ def speculate():
if reg_verify:
t_init = (time.time() - yy)/2 #start poor man's concurrency at last cex fime found
t_init = min(10,t_init)
- reg_verify = False #will cause pord_all to be used next
- print 'pord_all turned on'
+## temporary reg_verify = False #will cause pord_all to be used next
+## print 'pord_all turned on'
t = last_verify_time
print 'Verify time set to %d'%t
abc('w %s_beforerpm.aig'%f_name)
@@ -1231,7 +1392,12 @@ def speculate():
reconcile(rep_change) #end of pairing with reparam()
assert (npi == n_cex_pis()),'ERROR: #pi = %d, #cex_pi = %d'%(npi,n_cex_pis())
abc('&r %s_gore.aig;&w %s_gore_before.aig'%(f_name,f_name)) #we are making sure that none of the original POs fail
- PO = set_cex_po() #testing the &space
+ if reg_verify:
+ PO = set_cex_po(0) #testing the regular space
+ else:
+ abc('&r %s_gsrm.aig'%f_name)
+ PO = set_cex_po(1) # test against the &space.
+ print 'cex_PO is %d'%PO
if (-1 < PO and PO < (n_pos_before-n_pos_proved)):
print 'Found cex in original output = %d'%cex_po()
print 'Refinement time = %s'%convert(time.time() - ref_time)
@@ -1240,18 +1406,6 @@ def speculate():
return Error
refine_with_cex() #change the number of equivalences
continue
-## else: # we used pord_all()
-## cex_list = reconcile_all(cex_list, rep_change) #end of pairing with reparam()
-## PO = test_all_cexs(cex_list) #we have to make sure that none of the cex's fail the original POs.
-## if 0 <= PO and PO < (n_pos_before - n_pos_proved):
-## print 'PO = %d, n_pos_before = %d, n_pos_proved = %d'%(PO,n_pos_before, n_pos_proved)
-## print 'Found one of cexs in original output = %d'%cex_po()
-## print 'Refinement time = %0.2f'%(time.time() - ref_time)
-## return Sat_true
-## if PO == -1:
-## return Error
-## refine_with_cexs()
-## continue
elif (is_unsat() or n_pos() == 0):
print 'UNSAT'
print 'Refinement time = %s'%convert(time.time() - ref_time)
@@ -1278,7 +1432,8 @@ def speculate():
assert result == get_status(),'result: %d, status: %d'%(result,get_status())
set_cex(cex_list)
reconcile(rep_change)
- PO = set_cex_po() #testing the &space
+ abc('&r %s_gsrm.aig'%f_name)
+ PO = set_cex_po(1) #testing the & space
if (-1 < PO and PO < (n_pos_before-n_pos_proved)):
print 'Found cex in original output = %d'%cex_po()
print 'Refinement time = %s'%convert(time.time() - ref_time)
@@ -1287,16 +1442,6 @@ def speculate():
return Error
refine_with_cex() #change the number of equivalences
continue
-## cex_list = reconcile_all(cex_list, rep_change) #end of pairing with reparam()
-## PO = test_all_cexs(cex_list) #we have to make sure that none of the cex's fail the original POs.
-## if 0 <= PO and PO < (n_pos_before - n_pos_proved):
-## print 'found SAT in true output = %d'%cex_po()
-## print 'Refinement time = %s'%convert(time.time() - ref_time)
-## return Sat_true
-## if PO == -1:
-## return Error
-## refine_with_cexs()#change the number of equivalences
-## continue
elif is_unsat():
print 'UNSAT'
print 'Refinement time = %s'%convert(time.time() - ref_time)
@@ -1311,22 +1456,55 @@ def speculate():
sims = sims_old
print 'UNDECIDED'
print 'Refinement time = %s'%convert(time.time() - ref_time)
- write_file('spec')
- if n_pos_before == n_pos():
- return Undecided_no_reduction
- else:
+## if last_srm_po_size == initial_po_size: #essentially nothing happened. last_srm_po_size will be # POs in last srm.
+ if initial_sizes == [n_pis(),n_pos(),n_latches(),n_ands()]:
+ return Undecided_no_reduction #thus do not write spec file
+ else: #file was changed, so some speculation happened. If we find a cex later, need to know this.
+ write_file('spec')
return Undecided_reduction
+def simple_sat(t=10000):
+ y = time.time()
+ J = [14,2,7,9,30,31,26,5] #5 is pre_simp
+ funcs = create_funcs(J,t)
+ mtds =sublist(methods,J)
+ fork_last(funcs,mtds)
+ result = get_status()
+ if result > Unsat:
+ write_file('smp')
+ result = verify(slps+[14,2,3,7,9,30,31,26],t)
+ print 'Time for simple_sat = %0.2f'%(time.time()-y)
+ return RESULT[result]
+
+def simple(t=10000):
+ y = time.time()
+## J = [14,1,2,7,9,23,30,5] #5 is pre_simp
+## funcs = create_funcs(J,t)
+## mtds =sublist(methods,J)
+## fork_last(funcs,mtds)
+## result = get_status()
+## if result > Unsat:
+## write_file('smp')
+## J = slps+bmcs+pdrs+intrps
+## J = modify_methods(J)
+## result = verify(J,t)
+ J = slps+bmcs+pdrs+intrps
+ J = modify_methods(J)
+ result = verify(J,t)
+## print 'Time for simple = %0.2f'%(time.time()-y)
+ return RESULT[result]
+
+
def simple_bip(t=1000):
y = time.time()
- J = [0,1,2,30,5] #5 is pre_simp
+ J = [14,1,2,30,5] #5 is pre_simp
funcs = create_funcs(J,t)
mtds =sublist(methods,J)
fork_last(funcs,mtds)
result = get_status()
if result > Unsat:
write_file('smp')
- result = verify(slps+[0,1,2,30],t)
+ result = verify(slps+[14,1,2,30],t)
print 'Time for simple_bip = %0.2f'%(time.time()-y)
return RESULT[result]
@@ -1369,10 +1547,6 @@ def check_cex():
run_command('testcex')
print 'cex po = %d'%cex_po()
return cex_po() >=0
-## if cex_po() == -1: # means gsrm changes after refinement - no output is asserted.
-## return False
-## else:
-## return True
def set_size():
"""Stores the problem size of the current design.
@@ -1426,19 +1600,11 @@ def quick_verify(n):
status = verify(J,t)
return status
-##def process_status():
-## if n_latches() == 0:
-## status = check_sat()
-## else:
-## status = get_status()
-## return status
-
def process_status(status):
""" if there are no FF, the problem is combinational and we still have to check if UNSAT"""
if n_latches() == 0:
return check_sat()
return status
-
def get_status():
"""this simply translates the problem status encoding done by ABC
@@ -1492,12 +1658,11 @@ def reconcile(rep_change):
# reconcile modifies cex and restores work AIG to beforerpm
abc('write_status %s_before.status;write_status tt_before.status'%f_name)
POb = set_cex_po()
-## assert POa == POb, 'cex PO afterrpm = %d, cex PO beforerpm = %d'%(POa,POb)
if POa != POb:
abc('&r %s_beforerpm.aig; &w tt_before.aig'%f_name)
abc('&r %s_afterrpm.aig; &w tt_after.aig'%f_name)
- print 'cex PO afterrpm = %d, cex PO beforerpm = %d'%(POa,POb)
-## assert POa == POb, 'cex PO afterrpm = %d, cex PO beforerpm = %d'%(POa,POb)
+ print 'cex PO afterrpm = %d not = cex PO beforerpm = %d'%(POa,POb)
+ assert POa > -1, 'cex did not assert any output'
def reconcile_all(lst, rep_change):
"""reconciles the list of cex's"""
@@ -1551,25 +1716,37 @@ def verify(J,t):
global x_factor, final_verify_time, last_verify_time, methods
set_globals()
t = int(max(1,t))
- N = bmc_depth()
- L = n_latches()
- I = n_real_inputs()
-## mtds = sublist(methods,J)
- #heuristic that if bmc went deep, then reachability might also
- if ( ((I+L<350)&(N>100)) or (I+L<260) or (L<80) ):
- J = J+reachs
- J = remove_intrps(J)
- if L < 80:
- J=J+[4]
+ J = modify_methods(J)
mtds = sublist(methods,J)
print mtds
#print J,t
F = create_funcs(J,t)
- (m,result) = fork(F,mtds) #FORK here
+ (m,result) = fork_break(F,mtds,'US') #FORK here
assert result == get_status(),'result: %d, status: %d'%(result,get_status())
return result
-
+def dsat_all(t=100,c=100000):
+ print 't=%d,c=%d'%(t,c)
+ N = n_pos()
+ abc('&get')
+ J = range(N)
+ ttt = time.time()
+ J.reverse()
+ abc('w %s_temp.aig'%f_name)
+ for j in J:
+ tt = time.time()
+ abc('r %s_temp.aig'%f_name)
+ run_command('cone -O %d; dc2; dsat -C %d'%(j,c))
+ if is_unsat():
+ print 'Output %d is %s'%(j,RESULT[2]),
+ else:
+ print 'Output %d is %s'%(j,RESULT[3]),
+ T = time.time() -tt
+ print 'time = %0.2f'%T
+ if time.time() - tt > t:
+ break
+ print 'Total time = %0.2f'%(time.time() - ttt)
+
def check_sat():
"""This is called if all the FF have disappeared, but there is still some logic left. In this case,
the remaining logic may be UNSAT, which is usually the case, but this has to be proved. The ABC command 'dsat' is used fro combinational problems"""
@@ -1580,7 +1757,11 @@ def check_sat():
abc('&get') #save the current circuit
abc('orpos;dsat -C %d'%G_C)
if is_sat():
- return Sat_true
+ abc('&put')
+ if n_pos() == 1:
+ return Sat_true
+ else:
+ return Undecided_no_reduction #some POs could be unsat.
elif is_unsat():
return Unsat
else:
@@ -1610,54 +1791,54 @@ def try_induction(C):
print 'Induction succeeded'
return Unsat
-def final_verify_recur(K):
- """During prove we make backups as we go. These backups have increasing abstractions done, which can cause
- non-verification by allowing false counterexamples. If an abstraction fails with a cex, we can back up to
- the previous design before the last abstraction and try to proceed from there. K is the backup number we
- start with and this decreases as the backups fails. For each backup, we just try final_verify.
- If ever we back up to 0, which is the backup just after simplify, we then try speculate on this. This often works
- well if the problem is a SEC problem where there are a lot of equivalences across the two designs."""
- global last_verify_time
- #print 'Proving final_verify_recur(%d)'%K
- last_verify_time = 2*last_verify_time
- print 'Verify time increased to %d'%last_verify_time
- for j in range(K):
- i = K-(j+1)
- abc('r %s_backup_%d.aig'%(initial_f_name,i))
- if ((i == 0) or (i ==2)): #don't try final verify on original last one
- status = prob_status()
- break
- print '\nVerifying backup number %d:'%i,
- #abc('r %s_backup_%d.aig'%(initial_f_name,i))
- ps()
- #J = [18,0,1,2,3,7,14]
- J = slps+sims+intrps+bmcs+pdrs
- t = last_verify_time
- status = verify(J,t)
- if status >= Unsat:
- return status
- if i > 0:
- print 'SAT returned, Running less abstract backup'
- continue
- break
- if ((i == 0) and (status > Unsat) and (n_ands() > 0)):
- print '\n***Running speculate on initial backup number %d:'%i,
- abc('r %s_backup_%d.aig'%(initial_f_name,i))
- ps()
- if n_ands() < 20000:
-## pre_simp()
- status = speculate()
- if ((status <= Unsat) or (status == Error)):
- return status
- #J = [18,0,1,2,3,7,14]
- J = slps+sims+intrps+bmcs+pdrs
- t = 2*last_verify_time
- print 'Verify time increased to %d'%last_verify_time
- status = verify(J,t)
- if status == Unsat:
- return status
- else:
- return Undecided_reduction
+##def final_verify_recur(K):
+## """During prove we make backups as we go. These backups have increasing abstractions done, which can cause
+## non-verification by allowing false counterexamples. If an abstraction fails with a cex, we can back up to
+## the previous design before the last abstraction and try to proceed from there. K is the backup number we
+## start with and this decreases as the backups fails. For each backup, we just try final_verify.
+## If ever we back up to 0, which is the backup just after simplify, we then try speculate on this. This often works
+## well if the problem is a SEC problem where there are a lot of equivalences across the two designs."""
+## global last_verify_time
+## #print 'Proving final_verify_recur(%d)'%K
+## last_verify_time = 2*last_verify_time
+## print 'Verify time increased to %d'%last_verify_time
+## for j in range(K):
+## i = K-(j+1)
+## abc('r %s_backup_%d.aig'%(initial_f_name,i))
+## if ((i == 0) or (i ==2)): #don't try final verify on original last one
+## status = prob_status()
+## break
+## print '\nVerifying backup number %d:'%i,
+## #abc('r %s_backup_%d.aig'%(initial_f_name,i))
+## ps()
+## #J = [18,0,1,2,3,7,14]
+## J = slps+sims+intrps+bmcs+pdrs
+## t = last_verify_time
+## status = verify(J,t)
+## if status >= Unsat:
+## return status
+## if i > 0:
+## print 'SAT returned, Running less abstract backup'
+## continue
+## break
+## if ((i == 0) and (status > Unsat) and (n_ands() > 0)):
+## print '\n***Running speculate on initial backup number %d:'%i,
+## abc('r %s_backup_%d.aig'%(initial_f_name,i))
+## ps()
+## if n_ands() < 20000:
+#### pre_simp()
+## status = speculate()
+## if ((status <= Unsat) or (status == Error)):
+## return status
+## #J = [18,0,1,2,3,7,14]
+## J = slps+sims+intrps+bmcs+pdrs
+## t = 2*last_verify_time
+## print 'Verify time increased to %d'%last_verify_time
+## status = verify(J,t)
+## if status == Unsat:
+## return status
+## else:
+## return Undecided_reduction
def smp():
abc('smp')
@@ -1674,7 +1855,11 @@ def trim():
reparam()
## print 'exiting trim'
-def prs():
+def prs(x=True):
+ global trim_allowed
+ """ If x is set to False, no reparameterization will be done in pre_simp"""
+ trim_allowed = x
+ print 'trim_allowed = ',trim_allowed
y = time.clock()
pre_simp()
print 'Time = %s'%convert(time.clock() - y)
@@ -1684,7 +1869,8 @@ def pre_simp():
"""This uses a set of simplification algorithms which preprocesses a design.
Includes forward retiming, quick simp, signal correspondence with constraints, trimming away
PIs, and strong simplify"""
- global trim_allowed
+ global trim_allowed, temp_dec
+ tt = time.time()
set_globals()
abc('&get; &scl; &put')
if (n_ands() > 200000 or n_latches() > 50000 or n_pis() > 40000):
@@ -1695,6 +1881,8 @@ def pre_simp():
## ps()
if n_latches() == 0:
return check_sat()
+## if n_ands()<70000:
+## abs('scorr -C 5000')
best_fwrd_min([10,11])
ps()
status = try_scorr_constr()
@@ -1712,19 +1900,67 @@ def pre_simp():
return check_sat()
if trim_allowed:
t = min(15,.3*G_T)
-## try_tempor(t)
- try_temps(15)
- if n_latches() == 0:
- return check_sat()
- try_phase()
- if n_latches() == 0:
- return check_sat()
+ if not '_smp' in f_name: #try this only once on a design
+ try_temps(15)
+ if n_latches() == 0:
+ return check_sat()
+ try_phase()
+ if n_latches() == 0:
+ return check_sat()
if ((n_ands() > 0) or (n_latches()>0)):
trim()
status = process_status(status)
+ print 'Simplification time = %0.2f'%(time.time()-tt)
+ return status
+
+def pre_simp2():
+ """This uses a set of simplification algorithms which preprocesses a design.
+ Includes forward retiming, quick simp, signal correspondence with constraints, trimming away
+ PIs, and strong simplify"""
+ global trim_allowed, temp_dec
+ tt = time.time()
+ set_globals()
+ abc('&get; &scl; &put')
+ if (n_ands() > 200000 or n_latches() > 50000 or n_pis() > 40000):
+ print 'Problem too large, simplification skipped'
+ return 'Undecided'
+ if ((n_ands() > 0) or (n_latches()>0)):
+ trim()
+## ps()
+ if n_latches() == 0:
+ return check_sat()
+ if n_ands()<70000:
+ abc('scorr -C 5000')
+ best_fwrd_min([10,11])
+ ps()
+ status = try_scorr_constr()
+ if ((n_ands() > 0) or (n_latches()>0)):
+ trim()
+ if n_latches() == 0:
+ return check_sat()
+ status = process_status(status)
if status <= Unsat:
return status
- return process_status(status)
+ simplify()
+ print 'Simplify: ',
+ ps()
+ if n_latches() == 0:
+ return check_sat()
+ if trim_allowed:
+ t = min(15,.3*G_T)
+ if not '_smp' in f_name: #try this only once on a design
+ try_temps(15)
+ if n_latches() == 0:
+ return check_sat()
+ try_phase()
+ if n_latches() == 0:
+ return check_sat()
+ if ((n_ands() > 0) or (n_latches()>0)):
+ trim()
+ status = process_status(status)
+ print 'Simplification time = %0.2f'%(time.time()-tt)
+ return status
+
def try_scorr_constr():
set_size()
@@ -1734,7 +1970,6 @@ def try_scorr_constr():
abc('r %s_savetemp.aig'%f_name)
return status
-
def factors(n):
l = [1,]
nn = n
@@ -1807,9 +2042,6 @@ def try_phase():
return
## init_simp = 0
print 'Trying phase abstraction - Max phase = %d'%n,
- #ps()
-## trim()
- #ps()
abc('w %s_phase_temp.aig'%f_name)
na = n_ands()
nl = n_latches()
@@ -1822,8 +2054,6 @@ def try_phase():
#p = choose_phase()
p = z[1]
abc('phase -F %d'%p)
-## ps()
- #print z
if no == n_pos(): #nothing happened because p is not mod period
print 'Phase %d is incompatible'%p
abc('r %s_phase_temp.aig'%f_name)
@@ -1837,7 +2067,6 @@ def try_phase():
print 'Phase %d is incompatible'%p
abc('r %s_phase_temp.aig'%f_name)
return
- #print 'Trying phase = %d: '%p,
print 'Simplifying with %d phases: => '%p,
simplify()
trim()
@@ -1882,7 +2111,6 @@ def try_phase():
try_phase()
return
abc('r %s_phase_temp.aig'%f_name)
- #ps()
return
def try_temp(t=15):
@@ -1895,11 +2123,12 @@ def try_temp(t=15):
na = n_ands()
best = [ni,nl,na]
F = create_funcs([18],t) #create a timer function
- F = F + [eval('(pyabc_split.defer(abc)("tempor -s; trm; scr; trm; tempor; trm; scr; trm"))')]
+## F = F + [eval('(pyabc_split.defer(abc)("tempor -s; trm; scr; trm; tempor; trm; scr; trm"))')]
+ F = F + [eval('(pyabc_split.defer(abc)("tempor -s; &get; &trim -o; &put; scr; &get; &trim -o; &put; tempor; &get; &trim -o; &put; scr; &get; &trim -o; &put"))')]
for i,res in pyabc_split.abc_split_all(F):
break
cost = rel_cost(best)
- print 'Cost = %0.2f'%cost
+ print 'cost = %0.2f'%cost
if cost < .01:
ps()
return
@@ -1917,64 +2146,10 @@ def try_temps(t=15):
else:
best = (n_pis(),n_latches(),n_ands())
-def try_tempor(t):
- btime = time.clock()
- trim()
- print'Trying temporal decomposition - for max %s sec. '%convert(t),
- abc('w %s_best.aig'%f_name)
- ni = n_pis()
- nl = n_latches()
- na = n_ands()
- best = [ni,nl,na]
- F = create_funcs([18],t) #create a timer function
- #Caution: trm done in the following removes POs and PIs
-## F = F + [eval('(pyabc_split.defer(abc)("tempor -s -C %d; trm; lcr; scr; trm"))'%(2*na))]
-## F = F + [eval('(pyabc_split.defer(abc)("tempor -C %d; trm; lcr; scr; trm"))'%(2*na))]
- F = F + [eval('(pyabc_split.defer(abc)("tempor -s; trm; lcr; scr; trm"))')]
- F = F + [eval('(pyabc_split.defer(abc)("tempor -C; trm; lcr; scr; trm"))')]
- n_done = 0
- new_best = 0
-## debug_here()
- for i,res in pyabc_split.abc_split_all(F):
- if i == 0:
- break
- else:
- cost = rel_cost(best)
- print 'Cost = %0.2f'%cost
- if cost < .01:
- abc('w %s_best.aig'%f_name)
- best = [n_pis(),n_latches(),n_ands()]
- new_best = 1
- n_done = n_done+1
- if n_done == 2:
- break
- else:
- continue
- abc('r %s_best.aig'%f_name)
- ps()
- if new_best == 0: #nothing new
- print 'No reduction or timeout occurred'
- return
- if n_latches() == 0:
- return
- abc('scr;smp')
- trim()
- cost = rel_cost_t([ni,nl,na]) #see how much it improved
-## print 'rel_cost_t = %0.2f'%cost
- if (cost < -.01):
- print 'time = %f: '%(time.clock() - btime),
- if cost < -1:
- print 'Trying second tempor'
- try_tempor(t)
- print 'Reduction: time=%f'%(time.clock() - btime)
- return
- else:
- print 'No reduction'
- return
def rel_cost_t(J):
""" weighted relative costs versus previous stats."""
- if n_latches() == 0:
+ if (n_latches() == 0 and J[1]>0):
return -10
nli = J[0]+J[1]
na = J[2]
@@ -1984,16 +2159,13 @@ def rel_cost_t(J):
#ri = (float(nri)-float(ni))/float(ni)
rli = (float(n_latches()+nri)-float(nli))/float(nli)
ra = (float(n_ands())-float(na))/float(na)
-## cost = 10*rli + .1*ra
cost = 10*rli + .5*ra
-## print 'cost = %0.2f'%c
-## print 'cost = %0.2f'%cost
return cost
def rel_cost(J):
""" weighted relative costs versus previous stats."""
global f_name
- if n_latches() == 0:
+ if (n_latches() == 0 and J[1]>0):
return -10
nri = n_real_inputs()
ni = J[0]
@@ -2004,7 +2176,7 @@ def rel_cost(J):
ri = (float(nri)-float(ni))/float(ni)
rl = (float(n_latches())-float(nl))/float(nl)
ra = (float(n_ands())-float(na))/float(na)
- cost = 1*ri + 10*rl + .1*ra
+ cost = 1*ri + 5*rl + .2*ra
## print 'Relative cost = %0.2f'%cost
return cost
@@ -2012,7 +2184,6 @@ def best_fwrd_min(J):
global f_name, methods
mtds = sublist(methods,J)
F = create_funcs(J,0)
- #print 'Trying forward retiming: running %s in parallel'%(mtds)
(m,result) = fork_best(F,mtds) #FORK here
print '%s: '%mtds[m],
@@ -2043,26 +2214,38 @@ def quick_simp():
"""A few quick ways to simplify a problem before more expensive methods are applied.
Uses & commands if problem is large. These commands use the new circuit based SAT solver"""
na = n_ands()
- if na < 30000:
+ if na < 60000:
abc('scl -m;lcorr;drw')
else:
abc('&get;&scl;&lcorr;&put;drw')
+ print 'Using quick simplification',
+ status = process_status(get_status())
+ if status <= Unsat:
+ result = RESULT[status]
+ else:
+ ps()
+## write_file('smp')
+#### K_backup = K = K+1
+ result = 'UNDECIDED'
+ return result
def scorr_constr():
"""Extracts implicit constraints and uses them in signal correspondence
Constraints that are found are folded back when done"""
-##################### Temporary
-# return Undecided_no_reduction
-#####################
na = max(1,n_ands())
n_pos_before = n_pos()
if ((na > 40000) or n_pos()>1):
return Undecided_no_reduction
abc('w %s_savetemp.aig'%f_name)
- if n_ands() < 3000:
- cmd = 'unfold -a -F 2'
+ na = max(1,n_ands())
+## f = 1
+ f = 18000/na #**** THIS can create a bug 10/15/11
+ f = min(f,4)
+ f = max(1,f)
+ if n_ands() > 18000:
+ cmd = 'unfold -s -F 2'
else:
- cmd = 'unfold'
+ cmd = 'unfold -F %d -C 5000'%f
abc(cmd)
if n_pos() == n_pos_before:
print 'No constraints found'
@@ -2070,16 +2253,13 @@ def scorr_constr():
if (n_ands() > na): #no constraints found
abc('r %s_savetemp.aig'%f_name)
return Undecided_no_reduction
- #print_circuit_stats()
na = max(1,n_ands())
-## f = 1
- f = 18000/na
- f = min(f,4)
- f = max(1,f)
- print 'Number of constraints = %d, frames = %d'%((n_pos() - n_pos_before),f)
+ f = 1
+ print 'Number of constraints = %d'%((n_pos() - n_pos_before))
abc('scorr -c -F %d'%f)
abc('fold')
trim()
+ print 'Constrained simplification: ',
ps()
return Undecided_no_reduction
@@ -2105,299 +2285,144 @@ def input_x_factor():
xfi = x_factor = input()
print 'x_factor set to %f'%x_factor
-##def prove_sec():
-## """
-## Like 'prove' proves all the outputs together. Speculation is done first
-## If undecided, the do super_prove.
-## """
-## global x_factor,xfi,f_name, last_verify_time,K_backup, sec_options
-## max_bmc = -1
-## K_backup = K = 0
-## result = prove_part_1(K) #initial simplification here
-## if n_latches() == 0:
-## return 1,result
-## K = K_backup
-## if ((result == 'SAT') or (result == 'UNSAT')):
-## return 1,result
-## assert K==1, 'K = %d'%K
-## result = prove_part_3(K) #speculation done here
-## if ((result == 'SAT') or (result == 'UNSAT')):
-## return 1,result
-## else:
-## return 1,super_prove(0)
-##
-## #################### End of ss
-## K = K_backup
-## #print 'after speculate'
-## status = get_status()
-## assert 0<K and K<4, 'K = %d'%K
-## if K > 1: # for K = 1, we will leave final verification for later
-## print 'Entering final_verify_recur(%d) from prove()'%K
-## status = final_verify_recur(K) # will start verifying with final verify
-## #starting at backup number K-1 (either K = 2 or 3 here
-## #1 if spec found true sat on abs, 2 can happen if abstraction
-## #did not work but speculation worked,
-## #3 if still undecided after spec)
-## else: #K=1 means that abstraction did not work and was proved wrong by speculation
-## if a == 0:
-## result = prove_spec_first()
-## if ((result == 'SAT') or (result == 'UNSAT')):
-## return 1,result
-## write_file('final')
-## return (not K == 1),RESULT[status]
def prove(a):
"""Proves all the outputs together. If ever an abstraction
was done then if SAT is returned,
we make RESULT return "undecided".
- If a == 1 skip speculate. K is the number of the next backup
- if a == 2 skip initial simplify and speculate"""
- global x_factor,xfi,f_name, last_verify_time,K_backup, t_init, sec_options
+ is a == 0 do smp and abs first
+ If a == 1 do smp and spec first
+ if a == 2 do quick simplification instead of full simplification, then abs first, spec second"""
+ global x_factor,xfi,f_name, last_verify_time,K_backup, t_init, sec_options, spec_found_cex
spec_first = False
max_bmc = -1
- K_backup = K = 0
- if a == 2: #skip initial simplification
- print 'Using quick simplification',
- abc('lcorr;drw')
- status = process_status(get_status())
- if status <= Unsat:
- result = RESULT[status]
- else:
- ps()
- write_file('smp')
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) #writing backup 0
- K_backup = K = K+1
- result = 'UNDECIDED'
- else:
- result = prove_part_1(K) #initial simplification here
+ abs_found_cex_after_spec = spec_found_cex_after_abs = False
+ if not '_smp' in f_name: #if already simplified, then don't do again
+ if a == 2 : #do quick simplification
+ result = quick_simp() #does not write 'smp' file
+## print result
+ else :
+ result = prove_part_1() #do full simplification here
if ((result == 'SAT') or (result == 'UNSAT')):
- return 1,result
- if n_latches() == 0:
- return 1,result
- if a == 0:
- spec_first = False
-## spec_first = check_if_spec_first()
- if spec_first:
- sec_options = 'g'
- print 'sec_options set to "g"'
- if n_latches() == 0:
- return 1,result
- K = K_backup
- if ((result == 'SAT') or (result == 'UNSAT')):
- return 1,result
- assert K==1, 'K = %d'%K
+ return result
+ if a == 1:
+ spec_first = True
t_init = 2
- if spec_first and a == 0:
- result = prove_part_3(K)
+ abs_found_cex_before_spec = spec_found_cex_before_abs = False
+## First phase
+ if spec_first:
+ result = prove_part_3() #speculation done here first
else:
- result = prove_part_2(K) #abstraction done here
- K = K_backup
+ result = prove_part_2() #abstraction done here first
if ((result == 'SAT') or (result == 'UNSAT')):
- return 1,result
- assert 0<K and K<3, 'K = %d'%K
- if a == 0:
+ return result
+## Second phase
+ if spec_first: #did spec already in first phase
t_init = 2
- if spec_first:
- result = prove_part_2(K) #speculation 2one here
- else:
- result = prove_part_3(K)
- if ((result == 'SAT') or (result == 'UNSAT')):
- return 1,result
- K = K_backup
- #print 'after speculate'
+ result = prove_part_2() #abstraction done here second
+ if result == 'SAT':
+ abs_found_cex_after_spec = True
+ else:
+ result = prove_part_3() #speculation done here second
+ if result == 'SAT':
+ spec_found_cex_after_abs = True
+ if result == 'UNSAT':
+ return result
status = get_status()
- assert 0<K and K<4, 'K = %d'%K
- if (((K > 2) and (n_pos()>1)) or ((K == 2) and spec_first)): # for K = 1, we will leave final verification for later
- print 'Entering final_verify_recur(%d) from prove()'%K
- status = final_verify_recur(K) # will start verifying with final verify
- #starting at backup number K-1 (either K = 2 or 3 here
- #1 if spec found true sat on abs, 2 can happen if abstraction
- #did not work but speculation worked,
- #3 if still undecided after spec)
- #K=1 or 2 and not spec_first means that abstraction did not work and was proved wrong by speculation
- elif ((a == 0) and K == 1):
- t_init = 2
- result = prove_spec_first()
+ if result == 'ERROR':
+ status = Error
+ if ('_abs' in f_name and spec_found_cex_after_abs): #spec file should not have been written in speculate
+ f_name = revert(f_name,1) #it should be as if we never did abstraction.
+ print 'f_name = %s'%f_name
+ abc('r %s.aig'%f_name) #restore previous
+ t_init = 2
+ if not '_rev' in f_name:
+ print 'proving speculation first'
+ write_file('rev') #maybe can get by with just changing f_name
+ print 'f_name = %s'%f_name
+ result = prove(1) #1 here means do smp and then spec
if ((result == 'SAT') or (result == 'UNSAT')):
- return 1,result
- write_file('final')
- return (not K == 1),RESULT[status]
-
-def psf():
- x = time.time()
- result = prove_spec_first()
- print 'Total clock time for %s = %f sec.'%(init_initial_f_name,(time.time() - x))
- return result
-
-def prove_spec_first():
- """Proves all the outputs together. If ever an abstraction
- was done then if SAT is returned,
- we make RESULT return "undecided".
- """
- global x_factor,xfi,f_name, last_verify_time,K_backup
- max_bmc = -1
- K_backup = K = 1
-## result = prove_part_1(K) #initial simplification here
-## if n_latches() == 0:
-## return result
-## K = K_backup
-## if ((result == 'SAT') or (result == 'UNSAT')):
-## return result
-## assumes that initial simplification has been done already.
- assert K==1, 'K = %d'%K
- result = prove_part_3(K) #speculation done here
- K = K_backup
- if ((result == 'SAT') or (result == 'UNSAT')):
- return result
- assert 0<K and K<3, 'K = %d'%K
- K = K_backup #K = 1 => speculation did not do anything
- if K == 1: # so don't try abstraction because it did not work the first time
+ return result
+ elif ('_spec' in f_name and abs_found_cex_after_spec): #abs file should not have been written in abstract
+ f_name = revert(f_name,1) #it should be as if we never did speculation.
+ abc('r %s.aig'%f_name) #restore previous
+ t_init = 2
+ if not '_rev' in f_name:
+ print 'proving abstraction first'
+ write_file('rev') #maybe can get by with just changing f_name
+ result = prove(0)
+ if ((result == 'SAT') or (result == 'UNSAT')):
+ return result
+ else:
return 'UNDECIDED'
- result = prove_part_2(K) #abstraction done here
- if result == 'UNSAT':
- return result
- if result == 'SAT': # abstraction proved speculation wrong.
- K = 2
- assert 0<K and K<4, 'K = %d'%K
- if K > 1: # for K = 1, we will leave final verification for later
- print 'Entering final_verify_recur(%d) from prove()'%K
- status = final_verify_recur(K) # will start verifying with final verify
- #starting at backup number K-1 (either K = 2 or 3 here
- #1 if spec found true sat on abs, 2 can happen if
- #speculation worked but abstraction proved it wrong,
- #3 if still undecided after spec and abstraction)
- write_file('final')
- return RESULT[status]
-
-def prove_part_1(K):
+def prove_part_1():
global x_factor,xfi,f_name, last_verify_time,K_backup
- #K=0
print 'Initial: ',
- print_circuit_stats()
+ ps()
x_factor = xfi
- print 'x_factor = %f'%x_factor
print '\n***Running pre_simp'
set_globals()
if n_latches() > 0:
-## status = pre_simp()
- set_globals()
- t = 1000
- funcs = [eval('(pyabc_split.defer(pre_simp)())')]
-## J = sims+pdrs+bmcs+intrps
- J = pdrs+bmcs+bestintrps
- funcs = create_funcs(J,t)+ funcs #important that pre_simp goes last
- mtds =sublist(methods,J) + ['pre_simplify']
- fork_last(funcs,mtds)
-## funcs = [eval(FUNCS[3])] + [eval(FUNCS[0])] + [eval(FUNCS[1])] + [eval(FUNCS[2])] + [eval(FUNCS[9])] + [eval(FUNCS[7])] + funcs
-## fork_last(funcs,['SIM', 'PDR','INTRP','BMC', 'BMC3', 'PDRm','pre_simp'])
- status = get_status()
+ status = run_par_simplify()
else:
status = check_sat()
if ((status <= Unsat) or (n_latches() == 0)):
return RESULT[status]
- if n_ands() == 0:
- abc('&get;,bmc -vt=2')
- if is_sat():
- return 'SAT'
trim()
- write_file('smp')
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) #writing backup 0
- K_backup = K = K+1
- #K=1
+ write_file('smp') #need to check that this was not written in pre_simp
set_globals()
- return 'UNDECIDED'
+ return RESULT[status]
-def prove_part_2(K):
+def run_par_simplify():
+ set_globals()
+ t = 1000
+ funcs = [eval('(pyabc_split.defer(pre_simp)())')]
+ J = pdrs+bmcs+intrps
+ J = modify_methods(J,1)
+ funcs = create_funcs(J,t)+ funcs #important that pre_simp goes last
+ mtds =sublist(methods,J) + ['pre_simp']
+ fork_last(funcs,mtds)
+ status = get_status()
+ return status
+
+def prove_part_2(ratio=.75):
"""does the abstraction part of prove"""
- global x_factor,xfi,f_name, last_verify_time,K_backup, trim_allowed
+ global x_factor,xfi,f_name, last_verify_time,K_backup, trim_allowed,ifbip
print'\n***Running abstract'
- nl_b = n_latches()
- status = abstract() #ABSTRACTION done here
-## write_file('abs')
-## print 'Abstract finished'
- if status == Undecided_no_reduction:
- K_backup = K = K-1 #K = 0
- if status == Unsat:
- write_file('abs')
- return RESULT[status]
-## trim()
- #just added in
+## print 'ifbip = %d'%ifbip
+ status = abstract(ifbip) #ABSTRACTION done here
+ status = process_status(status)
+ print 'abstract done, status is %d'%status
+ result = RESULT[status]
if status < Unsat:
- write_file('abs')
print 'CEX in frame %d'%cex_frame()
- return RESULT[status]
- if K > 0:
- t_old = trim_allowed
- if pord_on:
- trim_allowed = False
- pre_simp()
- trime_allowed = t_old
- #end of added in
- write_file('abs')
- status = process_status(status)
- if ((status <= Unsat) or status == Error):
- if status < Unsat:
- print 'CEX in frame %d'%cex_frame()
- return RESULT[status]
- return RESULT[status]
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) # writing backup 1 (or 0) after abstraction
- K_backup = K = K+1
- #K = 1 or 2 here
- return 'UNDECIDED'
+ return result #if we found a cex we do not want to trim.
+ trim()
+ return result
-def prove_part_3(K):
+def prove_part_3():
"""does the speculation part of prove"""
- global x_factor,xfi,f_name, last_verify_time,K_backup, init_initial_f_name
+ global x_factor,xfi,f_name, last_verify_time,init_initial_f_name
global max_bmc, sec_options
- #K_backup = K = K +1 #K = 1 or 2 K =1 means that abstraction did not reduce
- assert 0 < K and K < 3, 'K = %d'%K
- if ((n_ands() > 10000) and sec_options == ''):
+ if ((n_ands() > 36000) and sec_options == ''):
sec_options = 'g'
print 'sec_options set to "g"'
- if n_ands() == 0:
- print 'Speculation skipped because no AND nodes'
- else:
- print '\n***Running speculate'
-## pre_simp()
- status = speculate() #SPECULATION done here
- if status == Unsat:
- return RESULT[status]
- old_f_name = f_name
-## if not status < Unsat:
-## pre_simp()
-## write_file('spec1')
-## #we do not do the continuation of speculation right now so the following is not needed.
-## #abc('&r %s_gore.aig;&w %s_gore.aig'%(old_f_name,f_name)) #very subtle -needed for continuing spec refinement
- status = process_status(status)
- if status == Unsat:
- return RESULT[status]
- elif ((status < Unsat) or (status == Error)):
- print 'CEX in frame %d'%cex_frame()
- if K == 1: #if K = 1 then abstraction was not done.
- print 'speculate found cex on original'
- return RESULT[status] # speculate found cex on original
- K_backup = K = K-1 #since spec found a true cex, then result of abstract was wrong
- print 'cex means that abstraction was invalid'
- print 'Initial simplified AIG restored => ',
- abc('r %s_smp.aig'%init_initial_f_name)
- max_bmc = -1
- ps()
- assert K == 1, 'K = %d'%K
- else:
- trim()
- print 'Problem still undecided'
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) # writing backup 2 or 1
- # 2 after speculation and abstraction
- # 1 if abstraction did not reduce
- K_backup = K = K+1
- assert 1<K and K<4, 'K = %d'%K
-## write_file('spec2')
- trim()
- return 'UNDECIDED'
+ print '\n***Running speculate'
+ status = speculate() #SPECULATION done here
+ status = process_status(status)
+ print 'speculate done, status is %d'%status
+ result = RESULT[status]
+ if status < Unsat:
+ print 'CEX in frame %d'%cex_frame()
+ return result
+ trim() #if cex is found we doo not want to trim.
+ return result
def prove_all(dir,t):
- """Prove all aig files in this directory using super_prove and record the results in results.txt"""
+ """Prove all aig files in this directory using super_prove and record the results in results.txt
+ Not called from any subroutine
+ """
## t = 1000 #This is the timeoout value
xtime = time.time()
## dir = main.list_aig('')
@@ -2423,150 +2448,7 @@ def prove_all(dir,t):
xtime = tt
## print results
f.close()
- return results
-
-
-def prove_g_pos(a):
- """Proves the outputs clustered by a parameter a.
- a is the disallowed increase in latch support Clusters must be contiguous
- If a = 0 then outputs are proved individually. Clustering is done from last to first
- Output 0 is attempted to be proved inductively using other outputs as constraints.
- Proved outputs are removed if all the outputs have not been proved.
- If ever one of the proofs returns SAT, we stop and do not try any other outputs."""
- global f_name, max_bmc,x_factor,x
- x = time.time()
- #input_x_factor()
- init_f_name = f_name
- print 'Beginning prove_g_pos'
- prove_all_ind()
- print 'Number of outputs reduced to %d by fast induction with constraints'%n_pos()
- print '\n****Running second level prove****************\n'
- reparam()
-## try_rpm()
-## k,result = prove(1) # 1 here means do not try speculate.
-## if result == 'UNSAT':
-## print 'Second prove returned UNSAT'
-## return result
-## if result == 'SAT':
-## print 'CEX found'
-## return result
- print '\n********** Proving each output separately ************'
- #prove_all_ind()
- #print 'Number of outputs reduced to %d by fast induction with constraints'%n_pos()
- f_name = init_f_name
- abc('w %s_osavetemp.aig'%f_name)
- n = n_pos()
- print 'Number of outputs = %d'%n
- #count = 0
- #Now prove each remaining output separately.
- pos_proved = []
- J = 0
- jnext = n-1
- while jnext >= 0:
- max_bmc = -1
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- #Do in reverse order
- jnext_old = jnext
- if a == 0: # do not group
- extract(jnext,jnext)
- jnext = jnext -1
- else:
- jnext = group(a,jnext)
- if jnext_old > jnext+1:
- print '\nProving outputs [%d-%d]'%(jnext + 1,jnext_old)
- else:
- print '\nProving output %d'%(jnext_old)
- #ps()
- f_name = f_name + '_%d'%jnext_old
- result = prove_1()
- if result == 'UNSAT':
- if jnext_old > jnext+1:
- print '******** PROVED OUTPUTS [%d-%d] ******** '%(jnext+1,jnext_old)
- else:
- print '******** PROVED OUTPUT %d ******** '%(jnext_old)
- pos_proved = pos_proved + range(jnext +1,jnext_old+1)
- continue
- if result == 'SAT':
- print 'One of output in (%d to %d) is SAT'%(jnext + 1,jnext_old)
- return result
- else:
- print '******** UNDECIDED on OUTPUTS %d thru %d ******** '%(jnext+1,jnext_old)
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- if not len(pos_proved) == n:
- print 'Eliminating %d proved outputs'%(len(pos_proved))
- remove(pos_proved)
- trim()
- write_file('group')
- result = 'UNDECIDED'
- else:
- print 'Proved all outputs. The problem is proved UNSAT'
- result = 'UNSAT'
- print 'Total clock time for prove_g_pos = %f sec.'%(time.time() - x)
- return result
-
-def prove_pos(i):
- """
- i=1 means to execute prove_all_ind first
- Proved outputs are removed if all the outputs have not been proved.
- If ever one of the proofs returns SAT, we continue and try to resolve other outputs."""
- global f_name, max_bmc,x_factor,x
- x = time.time()
- #input_x_factor()
- init_f_name = f_name
- print 'Beginning prove_pos'
- remove_0_pos()
- if i:
- prove_all_ind()
- print 'Number of outputs reduced to %d by quick induction with constraints'%n_pos()
- print '********** Proving each output separately ************'
- f_name = init_f_name
- abc('w %s_osavetemp.aig'%f_name)
- n = n_pos()
- print 'Number of outputs = %d'%n
- pos_proved = []
- pos_disproved = []
- J = 0
- jnext = n-1
- while jnext >= 0:
- max_bmc = -1
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- #Do in reverse order
- jnext_old = jnext
- extract(jnext,jnext)
- jnext = jnext -1
- print '\nProving output %d'%(jnext_old)
- f_name = f_name + '_%d'%jnext_old
-## result = prove_1()
- result = super_prove(2) #do not do initial simplification
- if result == 'UNSAT':
- print '******** PROVED OUTPUT %d ******** '%(jnext_old)
- pos_proved = pos_proved + range(jnext +1,jnext_old+1)
- continue
- if result == 'SAT':
- print '******** DISPROVED OUTPUT %d ******** '%(jnext_old)
- pos_disproved = pos_disproved + range(jnext +1,jnext_old+1)
- continue
- else:
- print '******** UNDECIDED on OUTPUT %d ******** '%(jnext_old)
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- list = pos_proved + pos_disproved
- print 'Proved the following outputs: %s'%pos_proved
- print 'Disproved the following outputs: %s'%pos_disproved
- if not len(list) == n:
- print 'Eliminating %d resolved outputs'%(len(list))
- remove(list)
- trim()
- write_file('group')
- result = 'UNRESOLVED'
- else:
- print 'Proved or disproved all outputs. The problem is proved RESOLVED'
- result = 'RESOLVED'
- print 'Total clock time for prove_pos = %f sec.'%(time.time() - x)
- return result
+ return results
def remove_pos(lst):
"""Takes a list of pairs where the first part of a pair is the PO number and
@@ -2583,8 +2465,6 @@ def remove_pos(lst):
if jj[1] > 2:
unresolved = unresolved +[jj[0]]
print '%d outputs proved'%len(proved)
-## print disproved
-## abc('w xxx__yyy.aig')
if not proved == []:
if ((max(proved)>n_pos()-1) or min(proved)< 0):
print proved
@@ -2719,7 +2599,7 @@ def prove_pos_par0(n):
result = result + prove_n_par(N-j,j)
abc('r %s__xsavetemp.aig'%initial_f_name)
ps()
- print result
+## print result
remove_pos(result)
write_file('group')
return
@@ -2749,291 +2629,291 @@ def distribute(N,div):
result = result + [n]
return result
-def find_cex_par(tt):
- """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
- with the unproved outputs"""
- global trim_allowed,last_winner, last_cex, n_pos_before, t_init, j_last, sweep_time
- b_time = time.time() #Wall clock time
- n = n_pos()
- remove_0_pos()
- N = n_pos()
- full_time = all_proc = False
- print 'Number of POs: %d => %d'%(n,N)
- if N == 0:
- return Unsat
-## inc = 5 #******* increment for grouping for sweep set here *************
-## inc = min(12,max(inc, int(.1*N)))
- inc = 1+N/100
-## if N <1.5*inc: # if near the increment for grouping try to get it below.
-## prove_all_ind()
-## N = n_pos()
- if inc == 1:
- prove_all_ind()
- N = n_pos()
- T = int(tt) #this is the total time to be taken in final verification run before quitting speculation
-## if inc == 10:
-## t_init = 10
-## t = max(t_init/2,T/20)
-## if N <= inc:
-## t = T
-## print "inc = %d, Sweep time = %s, j_group = %d"%(inc,convert(t),j_last)
- t = sweep_time/2 #start sweeping at last time where cex was found.
-## it used to be t = 1 here but it did not make sense although seemed to work.
-## inc = 2
- while True: #poor man's concurrency
- N = n_pos()
- if N == 0:
- return Unsat
- #sweep_time controls so that when sweep starts after a cex, it starts at the last sweep time
- t = max(2,2*t) #double sweep time
- if t > .75*T:
- t = T
- full_time = True
- if ((N <= inc) or (N < 13)):
- t = sweep_time = T
- full_time = True
- inc = 1
-## sweep_time = 2*sweep_time
- if not t == T:
- t= sweep_time = max(t,sweep_time)
-## t = sweep_time
-##new heuristic
- if (all_proc and sweep_time > 8): #stop poor man's concurrency and jump to full time.
- t = sweep_time = T
- full_time - True #this might be used to stop speculation when t = T and the last sweep
-## found no cex and we do not prove Unsat on an output
- abc('w %s__ysavetemp.aig'%f_name)
- ps()
- if N < 50:
- inc = 1
- print "inc = %d, Sweep time = %s, j_last = %d"%(inc,convert(t),j_last)
- F = []
-## G = []
- #make new lambda functions since after the last pass some of the functions may have been proved and eliminated.
- for i in range(N):
- F = F + [eval('(pyabc_split.defer(verify_only)(%d,%s))'%(i,convert(T)))] #make time large and let sleep timer control timeouts
-## G = G + [range(i,i+1)]
- ######
- result = []
- outcome = ''
- N = len(F)
- rng = range(1+(N-1)/inc)
- rng = rng[j_last:]+rng[:j_last] #pick up in range where last found cex.
-## print 'rng = ',
-## print rng
- k = -1
- bb_time = time.time()
- for j in rng:
- k = k+1 #keeps track of how many groups we have processed.
- j_last = j
- J = j*inc
- JJ = J+inc
- JJ = min(N,JJ)
- if J == JJ-1:
- print 'Function = %d '%J,
- else:
- print 'Functions = [%d,%d]'%(J,JJ-1)
- Fj = create_funcs([18],t+1) #create timer function as i = 0 Here is the timer
- Fj = Fj + F[J:JJ]
- count = 0
- fj_time = time.time()
- abc('r %s__ysavetemp.aig'%f_name) #important need to restore aig here so the F refers to right thing when doing verify_only.
-## # because verify_only changes the aig.
-## ps()
- for i,res in pyabc_split.abc_split_all(Fj):
- count = count+1
- Ji = J+i-1 #gives output number
- if ((res == 0) or (res == 1)):
- abc('read_status %d.status'%Ji)
- res = get_status()
- outcome = 'CEX: Frame = %d, PO = %d, Time = %s'%(cex_frame(),Ji,convert((time.time() - fj_time)))
- break
- if i == 0: #sleep timer expired
- outcome = '*** Time expired in %s sec. Next group = %d to %d ***'%(convert(time.time() - fj_time),JJ,min(N,JJ+inc))
- break
- elif res == None: #this should not happen
- print res
- print Ji,RESULT[res],
- else: # output Ji was proved
- result = result + [[Ji,res]]
- if count >= inc:
- outcome = '--- all group processed without cex ---'
- all_proc = True
- break
- continue #this can only happen if inc > 1
- # end of for i loop
- if ((res < Unsat) and (not res == None)):
- break
- else:
- continue # continue j loop
- #end of for j loop
- if k < len(rng):
- t_init = t/2 #next time start with this time.
- else:
- j_last = j_last+1 #this was last j and we did not find cex, so start at next group
- print outcome + ' => ' ,
- if ((res < Unsat) and (not res == None)):
- t_init = t/2
- abc('read_status %d.status'%Ji) #make sure we got the right status file.
- #actually if doing abstraction we could remove proved outputs now, but we do not. -**inefficiency**
- return res
- else: #This implies that no outputs were disproved. Thus can remove proved outputs.
- abc('r %s__ysavetemp.aig'%f_name) #restore original aig
- if not result == []:
- res = []
- for j in range(len(result)):
- k = result[j]
- if k[1] == 2:
- res = res + [k[0]]
-## print res
-## result = mapp(res,G)
- result = res
-## print result
- remove(result) #remove the outputs that were proved UNSAT.
- #This is OK for both abstract and speculate
- print 'Number of POs reduced to %d'%n_pos()
- if n_pos() == 0:
- return Unsat
- if t>=T:
- return Undecided
- else:
- continue
- return Undecided
-
-def remap_pos():
- """ maintains a map of current outputs to original outputs"""
- global po_map
- k = j = 0
- new = []
- assert n_pos() == len(po_map), 'length of po_map, %d, and current # POs, %d, don"t agree'%(len(po_map),n_pos())
- for j in range(len(po_map)):
- N = n_pos()
- abc('removepo -N %d'%k) # this removes the output if it is 0 driven
- if n_pos() == N:
- new = new + [po_map[j]]
- k = k+1
- if len(new) < len(po_map):
-## print 'New map = ',
-## print new
- po_map = new
-
-def prove_mapped():
- """
- assumes that srm is in workspace and takes the unsolved outputs and proves
- them by using proved outputs as constraints.
- """
- global po_map
-## print po_map
- po_map.sort() #make sure mapped outputs are in order
- for j in po_map: #put unsolved outputs first
- run_command('swappos -N %d'%j)
- print j
- N = n_pos()
- assert N > len(po_map), 'n_pos = %d, len(po_map) = %d'%(N, len(po_map))
- run_command('constr -N %d'%(N-len(po_map))) #make the other outputs constraints
- run_command('fold') #fold constraints into remaining outputs.
- ps()
- prove_all_mtds(100)
+####def find_cex_par(tt):
+#### """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
+#### with the unproved outputs"""
+#### global trim_allowed,last_winner, last_cex, n_pos_before, t_init, j_last, sweep_time
+#### b_time = time.time() #Wall clock time
+#### n = n_pos()
+#### l=remove_const_pos()
+#### N = n_pos()
+#### full_time = all_proc = False
+#### print 'Number of POs: %d => %d'%(n,N)
+#### if N == 0:
+#### return Unsat
+###### inc = 5 #******* increment for grouping for sweep set here *************
+###### inc = min(12,max(inc, int(.1*N)))
+#### inc = 1+N/100
+###### if N <1.5*inc: # if near the increment for grouping try to get it below.
+###### prove_all_ind()
+###### N = n_pos()
+#### if inc == 1:
+#### prove_all_ind()
+#### N = n_pos()
+#### T = int(tt) #this is the total time to be taken in final verification run before quitting speculation
+###### if inc == 10:
+###### t_init = 10
+###### t = max(t_init/2,T/20)
+###### if N <= inc:
+###### t = T
+###### print "inc = %d, Sweep time = %s, j_group = %d"%(inc,convert(t),j_last)
+#### t = sweep_time/2 #start sweeping at last time where cex was found.
+###### it used to be t = 1 here but it did not make sense although seemed to work.
+###### inc = 2
+#### while True: #poor man's concurrency
+#### N = n_pos()
+#### if N == 0:
+#### return Unsat
+#### #sweep_time controls so that when sweep starts after a cex, it starts at the last sweep time
+#### t = max(2,2*t) #double sweep time
+#### if t > .75*T:
+#### t = T
+#### full_time = True
+#### if ((N <= inc) or (N < 13)):
+#### t = sweep_time = T
+#### full_time = True
+#### inc = 1
+###### sweep_time = 2*sweep_time
+#### if not t == T:
+#### t= sweep_time = max(t,sweep_time)
+###### t = sweep_time
+######new heuristic
+#### if (all_proc and sweep_time > 8): #stop poor man's concurrency and jump to full time.
+#### t = sweep_time = T
+#### full_time - True #this might be used to stop speculation when t = T and the last sweep
+###### found no cex and we do not prove Unsat on an output
+#### abc('w %s__ysavetemp.aig'%f_name)
+#### ps()
+#### if N < 50:
+#### inc = 1
+#### print "inc = %d, Sweep time = %s, j_last = %d"%(inc,convert(t),j_last)
+#### F = []
+###### G = []
+#### #make new lambda functions since after the last pass some of the functions may have been proved and eliminated.
+#### for i in range(N):
+#### F = F + [eval('(pyabc_split.defer(verify_only)(%d,%s))'%(i,convert(T)))] #make time large and let sleep timer control timeouts
+###### G = G + [range(i,i+1)]
+#### ######
+#### result = []
+#### outcome = ''
+#### N = len(F)
+#### rng = range(1+(N-1)/inc)
+#### rng = rng[j_last:]+rng[:j_last] #pick up in range where last found cex.
+###### print 'rng = ',
+###### print rng
+#### k = -1
+#### bb_time = time.time()
+#### for j in rng:
+#### k = k+1 #keeps track of how many groups we have processed.
+#### j_last = j
+#### J = j*inc
+#### JJ = J+inc
+#### JJ = min(N,JJ)
+#### if J == JJ-1:
+#### print 'Function = %d '%J,
+#### else:
+#### print 'Functions = [%d,%d]'%(J,JJ-1)
+#### Fj = create_funcs([18],t+1) #create timer function as i = 0 Here is the timer
+#### Fj = Fj + F[J:JJ]
+#### count = 0
+#### fj_time = time.time()
+#### abc('r %s__ysavetemp.aig'%f_name) #important need to restore aig here so the F refers to right thing when doing verify_only.
+###### # because verify_only changes the aig.
+###### ps()
+#### for i,res in pyabc_split.abc_split_all(Fj):
+#### count = count+1
+#### Ji = J+i-1 #gives output number
+#### if ((res == 0) or (res == 1)):
+#### abc('read_status %d.status'%Ji)
+#### res = get_status()
+#### outcome = 'CEX: Frame = %d, PO = %d, Time = %s'%(cex_frame(),Ji,convert((time.time() - fj_time)))
+#### break
+#### if i == 0: #sleep timer expired
+#### outcome = '*** Time expired in %s sec. Next group = %d to %d ***'%(convert(time.time() - fj_time),JJ,min(N,JJ+inc))
+#### break
+#### elif res == None: #this should not happen
+#### print res
+#### print Ji,RESULT[res],
+#### else: # output Ji was proved
+#### result = result + [[Ji,res]]
+#### if count >= inc:
+#### outcome = '--- all group processed without cex ---'
+#### all_proc = True
+#### break
+#### continue #this can only happen if inc > 1
+#### # end of for i loop
+#### if ((res < Unsat) and (not res == None)):
+#### break
+#### else:
+#### continue # continue j loop
+#### #end of for j loop
+#### if k < len(rng):
+#### t_init = t/2 #next time start with this time.
+#### else:
+#### j_last = j_last+1 #this was last j and we did not find cex, so start at next group
+#### print outcome + ' => ' ,
+#### if ((res < Unsat) and (not res == None)):
+#### t_init = t/2
+#### abc('read_status %d.status'%Ji) #make sure we got the right status file.
+#### #actually if doing abstraction we could remove proved outputs now, but we do not. -**inefficiency**
+#### return res
+#### else: #This implies that no outputs were disproved. Thus can remove proved outputs.
+#### abc('r %s__ysavetemp.aig'%f_name) #restore original aig
+#### if not result == []:
+#### res = []
+#### for j in range(len(result)):
+#### k = result[j]
+#### if k[1] == 2:
+#### res = res + [k[0]]
+###### print res
+###### result = mapp(res,G)
+#### result = res
+###### print result
+#### remove(result) #remove the outputs that were proved UNSAT.
+#### #This is OK for both abstract and speculate
+#### print 'Number of POs reduced to %d'%n_pos()
+#### if n_pos() == 0:
+#### return Unsat
+#### if t>=T:
+#### return Undecided
+#### else:
+#### continue
+#### return Undecided
+
+####def remap_pos():
+#### """ maintains a map of current outputs to original outputs"""
+#### global po_map
+#### k = j = 0
+#### new = []
+#### assert n_pos() == len(po_map), 'length of po_map, %d, and current # POs, %d, don"t agree'%(len(po_map),n_pos())
+#### for j in range(len(po_map)):
+#### N = n_pos()
+#### abc('removepo -N %d'%k) # this removes the output if it is 0 driven
+#### if n_pos() == N:
+#### new = new + [po_map[j]]
+#### k = k+1
+#### if len(new) < len(po_map):
+###### print 'New map = ',
+###### print new
+#### po_map = new
+
+####def prove_mapped():
+#### """
+#### assumes that srm is in workspace and takes the unsolved outputs and proves
+#### them by using proved outputs as constraints.
+#### """
+#### global po_map
+###### print po_map
+#### po_map.sort() #make sure mapped outputs are in order
+#### for j in po_map: #put unsolved outputs first
+#### run_command('swappos -N %d'%j)
+#### print j
+#### N = n_pos()
+#### assert N > len(po_map), 'n_pos = %d, len(po_map) = %d'%(N, len(po_map))
+#### run_command('constr -N %d'%(N-len(po_map))) #make the other outputs constraints
+#### run_command('fold') #fold constraints into remaining outputs.
+#### ps()
+#### prove_all_mtds(100)
-def mapp(R,G):
- result = []
- for j in range(len(R)):
- result = result + G[R[j]]
- return result
+####def mapp(R,G):
+#### result = []
+#### for j in range(len(R)):
+#### result = result + G[R[j]]
+#### return result
#_______________________________________
-def prove_g_pos_split():
- """like prove_g_pos but quits when any output is undecided"""
- global f_name, max_bmc,x_factor,x
- x = time.clock()
- #input_x_factor()
- init_f_name = f_name
- print 'Beginning prove_g_pos_split'
- prove_all_ind()
- print 'Number of outputs reduced to %d by fast induction with constraints'%n_pos()
- reparam()
-## try_rpm()
- print '********** Proving each output separately ************'
- f_name = init_f_name
- abc('w %s_osavetemp.aig'%f_name)
- n = n_pos()
- print 'Number of outputs = %d'%n
- pos_proved = []
- J = 0
- jnext = n-1
- while jnext >= 0:
- max_bmc = -1
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- jnext_old = jnext
- extract(jnext,jnext)
- jnext = jnext -1
- print '\nProving output %d'%(jnext_old)
- f_name = f_name + '_%d'%jnext_old
- result = prove_1()
- if result == 'UNSAT':
- if jnext_old > jnext+1:
- print '******** PROVED OUTPUTS [%d-%d] ******** '%(jnext+1,jnext_old)
- else:
- print '******** PROVED OUTPUT %d ******** '%(jnext_old)
- pos_proved = pos_proved + range(jnext +1,jnext_old+1)
- continue
- if result == 'SAT':
- print 'One of output in (%d to %d) is SAT'%(jnext + 1,jnext_old)
- return result
- else:
- print '******** UNDECIDED on OUTPUTS %d thru %d ******** '%(jnext+1,jnext_old)
- print 'Eliminating %d proved outputs'%(len(pos_proved))
- # remove outputs proved and return
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- remove(pos_proved)
- trim()
- write_file('group')
- return 'UNDECIDED'
- f_name = init_f_name
- abc('r %s_osavetemp.aig'%f_name)
- if not len(pos_proved) == n:
- print 'Eliminating %d proved outputs'%(len(pos_proved))
- remove(pos_proved)
- trim()
- write_file('group')
- result = 'UNDECIDED'
- else:
- print 'Proved all outputs. The problem is proved UNSAT'
- result = 'UNSAT'
- print 'Total time = %f sec.'%(time.clock() - x)
- return result
-
-def group(a,n):
- """Groups together outputs beginning at output n and any contiguous preceeding output
- that does not increase the latch support by a or more"""
- global f_name, max_bmc
- nlt = n_latches()
- extract(n,n)
- nli = n_latches()
- if n == 0:
- return n-1
- for J in range(1,n+1):
- abc('r %s_osavetemp.aig'%f_name)
- j = n-J
- #print 'Running %d to %d'%(j,n)
- extract(j,n)
- #print 'n_latches = %d'%n_latches()
- #if n_latches() >= nli + (nlt - nli)/2:
- if n_latches() == nli:
- continue
- if n_latches() > nli+a:
- break
- abc('r %s_osavetemp.aig'%f_name)
-## if j == 1:
-## j = j-1
- print 'extracting [%d-%d]'%(j,n)
- extract(j,n)
- ps()
- return j-1
+####def prove_g_pos_split():
+#### """like prove_g_pos but quits when any output is undecided"""
+#### global f_name, max_bmc,x_factor,x
+#### x = time.clock()
+#### #input_x_factor()
+#### init_f_name = f_name
+#### print 'Beginning prove_g_pos_split'
+#### prove_all_ind()
+#### print 'Number of outputs reduced to %d by fast induction with constraints'%n_pos()
+#### reparam()
+###### try_rpm()
+#### print '********** Proving each output separately ************'
+#### f_name = init_f_name
+#### abc('w %s_osavetemp.aig'%f_name)
+#### n = n_pos()
+#### print 'Number of outputs = %d'%n
+#### pos_proved = []
+#### J = 0
+#### jnext = n-1
+#### while jnext >= 0:
+#### max_bmc = -1
+#### f_name = init_f_name
+#### abc('r %s_osavetemp.aig'%f_name)
+#### jnext_old = jnext
+#### extract(jnext,jnext)
+#### jnext = jnext -1
+#### print '\nProving output %d'%(jnext_old)
+#### f_name = f_name + '_%d'%jnext_old
+#### result = prove_1()
+#### if result == 'UNSAT':
+#### if jnext_old > jnext+1:
+#### print '******** PROVED OUTPUTS [%d-%d] ******** '%(jnext+1,jnext_old)
+#### else:
+#### print '******** PROVED OUTPUT %d ******** '%(jnext_old)
+#### pos_proved = pos_proved + range(jnext +1,jnext_old+1)
+#### continue
+#### if result == 'SAT':
+#### print 'One of output in (%d to %d) is SAT'%(jnext + 1,jnext_old)
+#### return result
+#### else:
+#### print '******** UNDECIDED on OUTPUTS %d thru %d ******** '%(jnext+1,jnext_old)
+#### print 'Eliminating %d proved outputs'%(len(pos_proved))
+#### # remove outputs proved and return
+#### f_name = init_f_name
+#### abc('r %s_osavetemp.aig'%f_name)
+#### remove(pos_proved)
+#### trim()
+#### write_file('group')
+#### return 'UNDECIDED'
+#### f_name = init_f_name
+#### abc('r %s_osavetemp.aig'%f_name)
+#### if not len(pos_proved) == n:
+#### print 'Eliminating %d proved outputs'%(len(pos_proved))
+#### remove(pos_proved)
+#### trim()
+#### write_file('group')
+#### result = 'UNDECIDED'
+#### else:
+#### print 'Proved all outputs. The problem is proved UNSAT'
+#### result = 'UNSAT'
+#### print 'Total time = %f sec.'%(time.clock() - x)
+#### return result
+
+####def group(a,n):
+#### """Groups together outputs beginning at output n and any contiguous preceeding output
+#### that does not increase the latch support by a or more"""
+#### global f_name, max_bmc
+#### nlt = n_latches()
+#### extract(n,n)
+#### nli = n_latches()
+#### if n == 0:
+#### return n-1
+#### for J in range(1,n+1):
+#### abc('r %s_osavetemp.aig'%f_name)
+#### j = n-J
+#### #print 'Running %d to %d'%(j,n)
+#### extract(j,n)
+#### #print 'n_latches = %d'%n_latches()
+#### #if n_latches() >= nli + (nlt - nli)/2:
+#### if n_latches() == nli:
+#### continue
+#### if n_latches() > nli+a:
+#### break
+#### abc('r %s_osavetemp.aig'%f_name)
+###### if j == 1:
+###### j = j-1
+#### print 'extracting [%d-%d]'%(j,n)
+#### extract(j,n)
+#### ps()
+#### return j-1
def extract(n1,n2):
"""Extracts outputs n1 through n2"""
@@ -3041,183 +2921,450 @@ def extract(n1,n2):
if n2 > no:
return 'range exceeds number of POs'
abc('cone -s -O %d -R %d'%(n1, 1+n2-n1))
- #abc('scl') # eliminated because need to keep same number of inputs.
def remove_intrps(J):
+ global n_proc,ifbip
+ npr = n_proc
+ if 18 in J:
+ npr = npr+1
+ if len(J) <= npr:
+ return J
JJ = []
+ alli = [23,1,22] # if_no_bip, then this might need to be changed
+ l = len(J)-npr
+ alli = alli[:l]
for i in J:
- if i in allintrps:
+ if i in alli:
continue
else:
JJ = JJ +[i]
return JJ
-
-
def remove(lst):
"""Removes outputs in list"""
global po_map
n_before = n_pos()
zero(lst)
-## remap_pos()
- remove_0_pos()
- print 'list',
- lst
+ l=remove_const_pos()
print 'n_before = %d, n_list = %d, n_after = %d'%(n_before, len(lst), n_pos())
-
+
+def check_pos():
+ """ checks if any POs are 0, and removes them with a warning"""
+ N = n_pos()
+ l=remove_const_pos()
+ if not N == n_pos():
+ print 'WARNING: some POs are 0 and are rremoved'
+ print '%d POs removed'%(N - n_pos())
def zero(list):
"""Zeros out POs in list"""
for j in list:
run_command('zeropo -N %d'%j)
-def remove_0_pos():
+def mark_const_pos(ll=[]):
+ """ creates an indicator of which PO are const-0 and which are const-1
+ does not change number of POs
+ """
+ L = range(n_pos())
+ L.reverse()
+ if ll == []:
+ ll = [-1]*n_pos()
+ ind = ll
+ abc('&get') #save original
+ for j in L:
+ n_before = n_pos()
+ abc('removepo -N %d'%j) #removes const-0 output
+ if n_pos() < n_before:
+ ind[j]=0
+## print sumsize(ind)
+## ps()
+ abc('&put')
+ for j in L:
+ n_before = n_pos()
+ abc('removepo -z -N %d'%j) # -z removes const-1 output
+ if n_pos() < n_before:
+ ind[j]=1
+## ps()
+ abc('&put') #put back original
+## remove_const_pos()
+ print sumsize(ind)
+ return ind
+
+def remove_const_pos():
global po_map
"""removes the 0 pos, but no pis because we might get cexs and need the correct number of pis
Should keep tract of if original POs are 0 and are removed.
Can this happen outside of prove_all_ind or
pord_all which can set proved outputs to 0???
+ WARNING: This can remove constant 1 nodes too???
"""
run_command('&get; &trim -i; &put; addpi') #adds a pi only if there are none
po_map = range(n_pos())
-## # gone back to original method of removing pos. Thus po_map is irrelevant
-## remap_pos()
-## abc('addpi')
def psp():
quick_simp()
- result = run_parallel([6,21],500,'US') #runs 'run_parallel' and sp() in parallel. run_parallel uses
- #JP and TERM to terminate.
+ result = run_parallel([6,21],500,'US') #runs 'run_parallel' and sp() in parallel.
+## run_parallel uses JP and TERM to terminate.
return result
def sp():
+ global initial_f_name
"""Alias for super_prove"""
print 'Executing super_prove'
result = super_prove(0)
+ print '%s is done'%initial_f_name
return result
-##def super_sec(options):
-## """Main proof technique now for seq eq checking. Does original prove and if after speculation there are multiple output left
-## if will try to prove each output separately, in reverse order. It will quit at the first output that fails
-## to be proved, or any output that is proved SAT"""
-## global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time, sec_options
-## sec_options = options
-## init_initial_f_name = initial_f_name
-## if x_factor > 1:
-## print 'x_factor = %f'%x_factor
-## input_x_factor()
-## max_bmc = -1
-## x = time.time()
-## k = 2
-## K,result = prove_sec()
-## if ((result == 'SAT') or (result == 'UNSAT')):
-## print '%s: total clock time taken by super_prove = %f sec.'%(result,(time.time() - x))
-## return
-## elif ((result[:3] == 'UND') and (n_latches() == 0)):
-## return result
-## print '%s: total clock time taken by super_prove = %f sec.'%(result,(time.time() - x))
-## if n_pos() > 1:
-## print 'Entering prove_g_pos'
-## result = prove_g_pos(0)
-## print result
-## if result == 'UNSAT':
-## print 'Total clock time taken by super_prove = %f sec.'%(time.time() - x)
-## return result
-## if result == 'SAT':
-## k = 1 #Don't try to prove UNSAT on an abstraction that had SAT
-## # should go back to backup 1 since probably spec was bad.
-## if check_abs(): #if same as abstract version, check original simplified version
-## k = 0
-## y = time.time()
-## if K == 0: #K = 0 means that speculate found cex in abstraction.
-## k=0
-## print 'Entering BMC_VER_result(%d)'%k
-## result = BMC_VER_result(k)
-## #print 'win_list = ',win_list
-## print 'Total clock time taken by last gasp verification = %f sec.'%(time.time() - y)
-## print 'Total clock time for %s = %f sec.'%(init_initial_f_name,(time.time() - x))
-## return result
+def sumsize(L):
+ d = count_less(L,0)
+ u = count_less(L,1)-d
+ s = count_less(L,2) - (d+u)
+ return 'SAT = %d, UNSAT = %d, UNDECIDED = %d'%(s,u,d)
+
+def unmap(L,L2,map):
+ mx = max(list(map))
+ assert mx <= len(L2),'max of map = %d, length of L2 = %d'%(mx,len(L))
+ for j in range(len(map)):
+ L[j] = L2[map[j]] #expand results of L2 into L
+ return L
+
+def create_map(L,N):
+ map = [-1]*N
+ for j in range(len(L)):
+ lj = L[j]
+ for k in range(len(lj)):
+ map[lj[k]] = j
+ return map
+
+def mp(op='sp'):
+ L = multi_prove(op,900)
+ return sumsize(L)
+
+def multi_prove(op='sp',tt=900):
+ """two phase prove process for multiple output functions"""
+ global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time
+ global f_name_save,name_save
+ x_init = time.time()
+ N = n_pos()
+ L = [-1]*N
+ print 'Mapping for isomorphism: '
+ iso() #reduces number of POs
+ map = create_map(iso_eq_classes(),N) #creates map into original
+ N = n_pos()
+ r = pre_simp() #pre_simp
+ write_file('smp')
+ NP = n_pos()/N #if NP > 1 then NP unrollings were done.
+ if n_pos() > N:
+ assert NP>=2, 'NP not 2, n_pos = %d, N = %d, NP = %d'%(n_pos(),N,NP)
+ print 'pre_simp done. NP = %d\n\n'%NP
+ #WARNING: if phase abstraction done, then number of POs changed.
+ if r == Unsat:
+ L = [0]*N #all outputs are UNSAT
+ print sumsize(L)
+ return L
+ f_name_save = f_name
+ name_save = '%s_initial_save.aig'%f_name
+ abc('w %s'%name_save)
+ L1 = [-1]*n_pos() # L1 has length after pre_simp
+## L1= mark_const_pos(L1) #puts const values in L1
+## print sumsize(L1)
+ #########do second iso here
+ N = n_pos()
+ print 'Mapping for isomorphism: '
+ iso() #second iso - changes number of POs
+ map2 = create_map(iso_eq_classes(),N) #creates map into previous
+ L2 = [-1]*n_pos()
+ L2 = mark_const_pos(L2) #populates L2 with constants
+ print sumsize(L2)
+ #########second iso done
+ abc('r %s'%name_save)
+ L2 = mprove(L2,op,10) #populates L2 with results
+ print sumsize(L2)
+ time_left = tt - (time.time()-x_init)
+ print '\n\n\n*********time left = %d ********\n\n\n'%time_left
+ N = count_less(L2,0)
+ if N > 0:
+ t = max(100,time_left/N)
+ L2 = mprove(L2,op,t) #populates L2 with more results
+ S = sumsize(L2)
+ T = '%.2f'%(time.time() - x_init)
+ print '%s in time = %s'%(S,T)
+ ########undo second iso
+ L1 = unmap(L1,L2,map2)
+ print 'unmapping for iso: ',
+ print sumsize(L1)
+ #############
+ if NP > 1: #an unrolling was done
+ L1 = check_L(NP,L1) #map into reduced size before unrolling was done.
+ print 'unmapping for unrolling.',
+ print sumsize(L1)
+ L = unmap(L,L1,map)
+ print 'unmapping for iso',
+ print sumsize(L)
+ return L
+
+def check_L(NP,L):
+ """This happens when an unrolling creates additional POs
+ We want to check that L[j] = L[j+N] etc to make sure the PO results agree
+ in all phases, i.e. sat, unsat, or undecided
+ """
+ N = len(L)/NP #original number of POs
+ for j in range(N):
+ for k in range(NP)[1:]: #k = 1,2,...,NP-1
+ if (L[j] == 1):
+ break
+ elif L[j+k*N] == 1:
+ L[j] = 1
+ break
+ elif L[j] == -1:
+ continue #we have to continue to look for a 1
+ elif L[j] == 0:
+ if L[j+k*N] == -1:
+ L[j] = -1
+ break
+ continue #have to make sure that all phases are 0
+ return L[:N]
+
+
+def mprove(L,op='sp',tt=1000):
+ """ 0 = unsat, 1 = sat, -1 = undecided"""
+ global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time
+ global f_name_save,name_save,temp_dec
+ N = len(L)
+ t = tt #controls the amount of time spent on each cone
+ funcs = [eval('(pyabc_split.defer(%s)())'%op)]
+ funcs = create_funcs(slps,t)+funcs
+ mtds =sublist(methods,slps) + [op]
+ res = L
+ for j in range(N):
+ if L[j] > -1:
+ continue #already solved
+ abc('r %s'%name_save) #restore original function
+ x = time.time()
+ name = '%s_cone_%d'%(f_name_save,j)
+ abc('cone -s -O %d'%j)
+ abc('w %s.aig'%name)
+ print '\n********************************************'
+ read_file_quiet(name)
+ print '________%s(%s)__________'%(op,name)
+ temp_dec = False
+ fork_last(funcs,mtds)
+ T = '%.2f'%(time.time() - x)
+ out = get_status()
+ if out == Unsat:
+ res[j] = 0
+ if out < Unsat:
+ res[j] = 1
+ print '\n%s: %s in time = %s'%(name,RESULT[out],T)
+ return res
+
+def sp1(options = ''):
+ global sec_options
+ sec_options = options
+ return super_prove(1)
def super_prove(n=0):
"""Main proof technique now. Does original prove and if after speculation there are multiple output left
if will try to prove each output separately, in reverse order. It will quit at the first output that fails
to be proved, or any output that is proved SAT
n controls call to prove(n)
- n = 2 means skip initial simplify and speculate,
- n=1 skip initial simp.
+ is n == 0 do smp and abs first, then spec
+ if n == 1 do smp and spec first then abs
+ if n == 2 just do quick simplification instead of full simplification, then abs first, spec second
"""
- global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time
+ global max_bmc, init_initial_f_name, initial_f_name,win_list, last_verify_time, f_name
init_initial_f_name = initial_f_name
if x_factor > 1:
print 'x_factor = %f'%x_factor
input_x_factor()
max_bmc = -1
x = time.time()
- k = 2
- if n == 2:
- K,result = prove(2)
- else:
- K,result = prove(0)
+## if n == 2:
+## result = prove(2)
+## else:
+## result = prove(0)
+ result = prove(n)
if ((result == 'SAT') or (result == 'UNSAT')):
print '%s: total clock time taken by super_prove = %f sec.'%(result,(time.time() - x))
return result
- elif ((result[:3] == 'UND') and (n_latches() == 0)):
+ elif ((result == 'UNDECIDED') and (n_latches() == 0)):
return result
- print '%s: total clock time taken by super_prove = %f sec.'%(result,(time.time() - x))
+ print '%s: total clock time taken by super_prove so far = %f sec.'%(result,(time.time() - x))
y = time.time()
- if K == 0: #K = 0 means that speculate found cex in abstraction.
- k=0
if n == 2:
print 'Entering BMC_VER()'
- result = BMC_VER() #typically called from a super_prove run in parallel.
- if result == 'SAT': #this is because we have done an abstraction and cex is invalid.
+ result = BMC_VER() #n=2 is only called from sp2, a super_prove run in parallel.
+ if ((result == 'SAT') and (('_abs' in f_name)or '_spec' in f_name)): #this is because we have done an abstraction and cex is invalid.
result = 'UNDECIDED'
else:
- print 'Entering BMC_VER_result(%d)'%k
- result = BMC_VER_result(k)
- #print 'win_list = ',win_list
- print result
+ print 'Entering BMC_VER_result'
+ result = BMC_VER_result() #this does backing up if cex is found
print 'Total clock time taken by last gasp verification = %f sec.'%(time.time() - y)
print 'Total clock time for %s = %f sec.'%(init_initial_f_name,(time.time() - x))
return result
def reachm(t):
x = time.clock()
- #print 'trying reachm'
abc('&get;&reachm -vcs -T %d'%t)
print 'reachm done in time = %f'%(time.clock() - x)
return get_status()
def reachp(t):
x = time.clock()
- #print 'trying reachm2'
abc('&get;&reachp -rv -T %d'%t)
print 'reachm2 done in time = %f'%(time.clock() - x)
return get_status()
+def scorr():
+ run_command('scorr')
+ ps()
+
+def select_undecided(L):
+ res = []
+ for j in range(len(L)):
+ l = L[j]
+ if l[1] == 'UNDECIDED':
+ res = res + [l[0]]
+ return res
+
+####def execute(L,t):
+#### """
+#### run the files in the list L using ss, sp, ssm each for max time = t
+#### """
+#### funcs1 = [eval('(pyabc_split.defer(ss)())')]
+#### funcs1 = create_funcs(slps,t)+funcs1
+#### mtds1 =sublist(methods,slps) + ['ss']
+#### funcs2 = [eval('(pyabc_split.defer(sp)())')]
+#### funcs2 = create_funcs(slps,t)+funcs2
+#### mtds2 =sublist(methods,slps) + ['sp']
+#### funcs3 = [eval('(pyabc_split.defer(ssm)())')]
+#### funcs3 = create_funcs(slps,t)+funcs3
+#### mtds3 =sublist(methods,slps) + ['ssm']
+#### for j in range(len(L)):
+#### name = L[j]
+#### print '\n\n\n\n________ss__________'
+#### read_file_quiet(name)
+#### print '****ss****'
+#### fork_last(funcs1,mtds1)
+#### print '***Done with ss on %s\n'%name
+#### print '\n\n******ssm************'
+#### read_file_quiet(name)
+#### print '****ssm****'
+#### fork_last(funcs3,mtds3)
+#### print '***Done with ssm on %s \n'%name
+
+def execute_op(op,L,t):
+ """
+ run the files in the list L using operation "op", each for max time = t
+ """
+ funcs = [eval('(pyabc_split.defer(%s)())'%op)]
+ funcs = create_funcs(slps,t)+funcs
+ mtds =sublist(methods,slps) + [op]
+ res = []
+ for j in range(len(L)):
+ x = time.time()
+ name = L[j]
+ print '\n\n\n\n________%s__________'%op
+ read_file_quiet(name)
+ m,result = fork_last(funcs,mtds)
+ if result == Undecided:
+ result = RESULT[result]
+ T = '%.2f'%(time.time() - x)
+ new_res = [name,result,T]
+ res = res + [new_res]
+ print '\n%s'%new_res
+ return res
+
+def x_ops(ops,L,t):
+ """ execute each op in the set of ops on each file in the set of files of L, each for time t"""
+ result = []
+ for j in range(len(ops)):
+ op = ops[j]
+ result.append('Result of %s'%op)
+ result.append(execute_op(op,L,t))
+ return result
+
+def iso(n=0):
+ if n == 0:
+ run_command('&get;&iso;&put')
+ else:
+ run_command('&get;&iso;iso;&put')
+
+def check_iso(N):
+ ans = get_large_po()
+ if ans == -1:
+ return 'no output found'
+ n_iso = count_iso(N)
+ return n_iso
+
+def count_iso(N):
+ abc('&get;write_aiger -u file1.aig') #put this cone in & space and write file1
+## print 'PO %d is used'%i
+ n_iso = 0 #start count
+ for i in range(N):
+ abc('permute;write_aiger -u file2.aig')
+ n = filecmp.cmp('file1.aig','file2.aig')
+ print n,
+ n_iso = n_iso+n
+ print 'the number of isomorphisms was %d out of %d'%(n_iso,N)
+ return n_iso
+
+def get_large_po():
+## remove_const_pos() #get rid of constant POs
+ NL = n_latches()
+ NO = n_pos()
+ abc('&get') #put the in & space
+ n_latches_max = 0
+ nl = imax = -1
+ for i in range(NO): #look for a big enough PO
+ abc('&put;cone -s -O %d;scl'%i)
+ nl = n_latches()
+ if nl >.15*NL:
+ imax = i
+## print 'cone %d has %d FF'%(i,nl)
+ break
+ if nl> n_latches_max:
+ n_latches_max = nl
+ imax = i
+ print i,nl
+ if i == NO-1:
+ print 'no PO is big enough'
+ return -1
+ print 'PO_cone = %d, n_latches = %d'%(imax,nl)
+
+def scorro():
+ run_command('scorr -o')
+ l = remove_const_pos()
+ ps()
+
+def drw():
+ run_command('drw')
+ ps()
+
+def dc2rs():
+ abc('dc2rs')
+ ps()
+
+
def reachn(t):
x = time.clock()
- #print 'trying reachm3'
abc('&get;&reachn -rv -T %d'%t)
print 'reachm3 done in time = %f'%(time.clock() - x)
return get_status()
def reachx(t):
x = time.time()
- #print 'trying reachx'
abc('reachx -t %d'%t)
print 'reachx done in time = %f'%(time.time() - x)
return get_status()
def reachy(t):
x = time.clock()
- #print 'trying reachy'
abc('&get;&reachy -v -T %d'%t)
print 'reachy done in time = %f'%(time.clock() - x)
return get_status()
-
def create_funcs(J,t):
"""evaluates strings indexed by J in methods given by FUNCS
Returns a list of lambda functions for the strings in FUNCs
@@ -3241,67 +3388,68 @@ def check_abs():
abc('r %s_save.aig'%init_initial_f_name)
return False
-"""make a special version of BMC_VER_result that just works on the current network"""
-def BMC_VER():
- global init_initial_f_name, methods, last_verify_time
- #print init_initial_f_name
- xt = time.time()
- result = 5
- t = max(2*last_verify_time,100)
- print 'Verify time set to %d'%t
+def modify_methods(J,dec=0):
+ """ adjusts the engines to reflect number of processors"""
N = bmc_depth()
L = n_latches()
I = n_real_inputs()
- X = pyabc_split.defer(abc)
- J = slps + pdrs + [23] + bmcs
+ npr = n_proc -dec
+ if 18 in J: #if sleep in J add 1 more processor
+ npr = npr+1
if ( ((I+L<350)&(N>100)) or (I+L<260) or (L<80) ):
- J = J+reachs # add all reach methods
- if L < 80:
- J = J + [4]
+ if not 24 in J: #24 is reachs
+ J = J+[24] # add all reach methods
+ if len(J)>npr:
+ J = remove_intrps(J) #removes only if len(J)<n_processes
+ if len(J)< npr: #if not using all processors, add in pdrs
+ #modify allpdrs to reflect ifbip (see set_engines)
+ for j in range(len(allpdrs)):
+ if allpdrs[j] in J: #leave it in
+ continue
+ else: #add it in
+ J = J + [allpdrs[j]]
+ if len(J) == npr:
+ break
+## if L < 80:
+## if ((not 4 in J) and len(J) < n_proc):
+## J = J + [4]
+ return J
+
+def BMC_VER():
+ """ a special version of BMC_VER_result that just works on the current network
+ Just runs engines in parallel - no backing up
+ """
+ global init_initial_f_name, methods, last_verify_time, n_proc
+ xt = time.time()
+ result = 5
+ t = max(2*last_verify_time,10000) ####
+ print 'Verify time set to %d'%t
+ J = slps + pdrs + bmcs + intrps
+ J = modify_methods(J)
F = create_funcs(J,t)
mtds = sublist(methods,J)
- print '%s'%mtds
+ print mtds
(m,result) = fork_break(F,mtds,'US')
- print '(m,result) = %d,%s'%(m,result)
result = RESULT[result]
- print 'BMC_VER() result = %s'%result
+ print 'BMC_VER result = %s'%result
return result
-def BMC_VER_result(n):
- global init_initial_f_name, methods, last_verify_time
- #print init_initial_f_name
+def BMC_VER_result():
+ global init_initial_f_name, methods, last_verify_time,f_name
xt = time.time()
result = 5
- if n == 0:
- abc('r %s_smp.aig'%init_initial_f_name)
- print '\n***Running proof on initial simplified circuit\n',
- ps()
- elif n == 1:
- abc('r %s_smp_abs.aig'%init_initial_f_name)
- print '\n***Running proof on abstracted circuit',
- ps()
- else: # n was 2
- print '\n***Running proof on final unproved circuit',
- ps()
- t = max(2*last_verify_time,1000)
+ abc('r %s.aig'%f_name)
+ print '\n***Running proof on %s :'%f_name,
+ ps()
+ t = max(2*last_verify_time,10000) #each time a new time-out is set t at least 1000 sec.
print 'Verify time set to %d'%t
- #status = verify(J,t)
- N = bmc_depth()
- L = n_latches()
- I = n_real_inputs()
X = pyabc_split.defer(abc)
- J = slps + pdrs + [23] +bmcs
-## [0,1,7,14] # try pdr, interpolation, and pdrm
-## if n == 0:
-## J = J+ bmcs #add BMC #eveen if n =1 or 2 we want to find a cex
- #heuristic that if bmc went deep, then reachability might also
- if ( ((I+L<350)&(N>100)) or (I+L<260) or (L<80) ):
- #J = J+[4,13] #add reachx and reachn
- J = J+reachs # add all reach methods
- if L < 80:
- J = J + [4]
- #F = eval(S)
+ J = slps + pdrs + bmcs + intrps
+ last_name = seq_name(f_name).pop()
+ if 'smp' == last_name: # then we try harder to prove it.
+ J = slps + intrps + allpdrs + [2]
+ J = modify_methods(J) #if # processors is enough and problem is small enough then add in reachs
F = create_funcs(J,t)
mtds = sublist(methods,J)
print '%s'%mtds
@@ -3309,21 +3457,16 @@ def BMC_VER_result(n):
result = get_status()
if result == Unsat:
return 'UNSAT'
- if n == 0:
+ if last_name == 'smp' : # can't backup so just return result
if result < Unsat:
return 'SAT'
if result > Unsat: #still undefined
return 'UNDECIDED'
- elif n == 1: #just tried abstract version - try initial simplified version
+ else: # (last_name == 'spec' or last_name == 'abs') - the last thing we did was an "abstraction"
if result < Unsat:
- return BMC_VER_result(0)
- else: #if undecided on good abstracted version, does not make sense to try initial one
- return 'UNDECIDED'
- else: # n was 2, just tried final unresolved version - now try abstract version
- if result < Unsat:
- return BMC_VER_result(1) #try abstract version
- else: #undecided on final unproved circuit. Probably can't do better.
- return 'UNDECIDED'
+ f_name = revert(f_name,1) # revert the f_name back to previous
+ abc('r %s.aig'%f_name)
+ return BMC_VER_result() #recursion here.
def try_split():
abc('w %s_savetemp.aig'%f_name)
@@ -3344,13 +3487,14 @@ def prove_all_ind():
"""Tries to prove output k by induction, using other outputs as constraints.
If ever an output is proved
it is set to 0 so it can't be used in proving another output to break circularity.
- Finally all zero'ed ooutputs are removed.
+ Finally all zero'ed outputs are removed.
Prints out unproved outputs Finally removes 0 outputs
"""
global n_pos_proved, n_pos_before
+ print 'n_pos_proved = %d'%n_pos_proved
n_proved = 0
N = n_pos()
-## remove_0_pos()
+## l=remove_const_pos()
## print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
if n_pos() == 1:
return
@@ -3361,16 +3505,17 @@ def prove_all_ind():
for j in lst:
## abc('zeropo -N 0')
abc('swappos -N %d'%j)
-## remove_0_pos() #may not have to do this if constr works well with 0'ed outputs
+## l=remove_const_pos() #may not have to do this if constr works well with 0'ed outputs
abc('constr -N %d'%(n_pos()-1))
abc('fold')
n = max(1,n_ands())
f = max(1,min(40000/n,16))
f = int(f)
- abc('ind -ux -C 10000 -F %d'%f)
+## abc('ind -C 10000 -F %d'%f)
+ abc('ind -C 1000 -F %d'%f)
## run_command('print_status')
status = get_status()
- abc('r %s_osavetemp.aig'%f_name)
+ abc('r %s_osavetemp.aig'%f_name) #have to restore original here
if status == Unsat:
## print '+',
abc('zeropo -N %d'%j)
@@ -3381,11 +3526,63 @@ def prove_all_ind():
print '-%d'%j,
else:
print '*%d'%j,
- remove_0_pos()
+ l=remove_const_pos()
n_pos_proved = n_pos_proved + n_proved
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
+ print 'n_pos_proved = %d'%n_pos_proved
#return status
+def remove_iso(L):
+ global n_pos_proved, n_pos_before
+ lst = []
+ for j in range(len(L)):
+ ll = L[j][1:]
+ if len(ll) == 0:
+ continue
+ else:
+ lst = lst + ll
+ zero(lst)
+ n_pos_proved = n_pos_proved + count_less(lst,n_pos_before - n_pos_proved)
+ print 'The number of POs removed by iso was %d'%len(lst)
+ l=remove_const_pos() #can an original PO be zero?
+
+def prove_all_iso():
+ """Tries to prove output k by isomorphism. Gets number of iso-eq_classes as an array of lists.
+ Updates n_pos_proved
+ """
+ global n_pos_proved, n_pos_before
+ n_proved = 0
+ N = n_pos()
+ if n_pos() == 1:
+ return
+ print 'n_pos_proved = %d'%n_pos_proved
+## run_command('&get;&iso;&put')
+ abc('&get;&iso')
+ L = iso_eq_classes()
+## print L
+ remove_iso(L)
+## lim = n_pos_before - n_pos_proved
+## for j in range(len(L)):
+## ll = L[j]
+## if len(ll) == 1:
+## continue
+## if not ll[0] < lim:
+## continue
+## else:
+## n = count_less(ll[1:], lim) #drop the first since it is the representative.
+## print n
+## n_proved = n_proved + n
+## print n, n_proved
+## n_pos_proved = n_pos_proved + n_proved
+ print '\nThe number of POs reduced by iso was from %d to %d'%(N,n_pos())
+
+def count_less(L,n):
+ count = 0
+ for j in range(len(L)):
+ if L[j] < n:
+ count = count + 1
+ return count
+
def prove_all_mtds(t):
"""
Tries to prove output k with multiple methods in parallel,
@@ -3394,13 +3591,13 @@ def prove_all_mtds(t):
Finally all zero'ed ooutputs are removed.
"""
N = n_pos()
-## remove_0_pos()
+## l=remove_const_pos()
## print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
for j in list:
run_command('swappos -N %d'%j)
-## remove_0_pos() #may not have to do this if constr works well with 0'ed outputs
+## l=remove_const_pos() #may not have to do this if constr works well with 0'ed outputs
abc('constr -N %d'%(n_pos()-1))
abc('fold')
## cmd = '&get;,pdr -vt=%d'%t #put in parallel.
@@ -3414,7 +3611,7 @@ def prove_all_mtds(t):
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
assert not is_sat(), 'one of the POs is SAT' #we can do better than this
- remove_0_pos()
+ l=remove_const_pos()
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3423,13 +3620,13 @@ def prove_all_pdr(t):
it is set to 0 so it can't be used in proving another output to break circularity.
Finally all zero'ed ooutputs are removed. """
N = n_pos()
-## remove_0_pos()
+## l=remove_const_pos()
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
for j in list:
abc('swappos -N %d'%j)
-## remove_0_pos() #may not have to do this if constr works well with 0'ed outputs
+## l=remove_const_pos() #may not have to do this if constr works well with 0'ed outputs
abc('constr -N %d'%(n_pos()-1))
abc('fold')
cmd = '&get;,pdr -vt=%d'%t #put in parallel.
@@ -3441,14 +3638,14 @@ def prove_all_pdr(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- remove_0_pos()
+ l=remove_const_pos()
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
def prove_each_ind():
"""Tries to prove output k by induction, """
N = n_pos()
- remove_0_pos()
+ l=remove_const_pos()
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3465,7 +3662,7 @@ def prove_each_ind():
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- remove_0_pos()
+ l=remove_const_pos()
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3473,7 +3670,7 @@ def prove_each_pdr(t):
"""Tries to prove output k by PDR. If ever an output is proved
it is set to 0. Finally all zero'ed ooutputs are removed. """
N = n_pos()
- remove_0_pos()
+ l=remove_const_pos()
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3488,7 +3685,7 @@ def prove_each_pdr(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- remove_0_pos()
+ l=remove_const_pos()
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3496,7 +3693,7 @@ def disprove_each_bmc(t):
"""Tries to prove output k by PDR. If ever an output is proved
it is set to 0. Finally all zero'ed ooutputs are removed. """
N = n_pos()
- remove_0_pos()
+ l=remove_const_pos()
print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
abc('w %s_osavetemp.aig'%f_name)
list = range(n_pos())
@@ -3511,7 +3708,7 @@ def disprove_each_bmc(t):
abc('zeropo -N %d'%j)
abc('w %s_osavetemp.aig'%f_name) #if changed, store it permanently
print '%d'%j,
- remove_0_pos()
+ l=remove_const_pos()
print '\nThe number of POs reduced from %d to %d'%(N,n_pos())
#return status
@@ -3520,6 +3717,8 @@ def pord_1_2(t):
global n_pos_proved, ifpord1, pord_on
pord_on = True # make sure that we do not reparameterize after abstract in prove_2
n_pos_proved = 0
+ if n_pos()<4:
+ return Undecided
if ifpord1:
print 'Trying each output for %0.2f sec'%(.1*t)
result = pord_all(.1*t) #we want to make sure that there is no easy cex.
@@ -3527,6 +3726,7 @@ def pord_1_2(t):
return result
ifpord1 = 0
print 'Trying each output for %0.2f sec'%t
+ #might consider using iso before the second pass of pord_all
result = pord_all(t+2*G_T) #make sure there is enough time to abstract
pord_on = False #done with pord
return result
@@ -3537,12 +3737,6 @@ def pord_all(t):
print 'last_cx = %d'%last_cx
btime = time.time()
N = n_pos()
-## remove_0_pos()
- print '0 valued output removal changed POs from %d to %d'%(N,n_pos())
-## bmc_ss(t)
-## if is_sat():
-## cex_list = cex_get_vector()
-## return Sat
prove_all_ind() ############ change this to keep track of n_pos_proved
nn = n_pos()
abc('w %s_osavetemp.aig'%f_name)
@@ -3550,7 +3744,8 @@ def pord_all(t):
return Undecided
lst = range(n_pos())
proved = disproved = []
-## abc('&get') #using this space to save original file
+ abc('&get') #using this space to save original file.
+ ### Be careful that & space is not changed.
## with redirect.redirect( redirect.null_file, sys.stdout ):
## with redirect.redirect( redirect.null_file, sys.stderr ):
cx_list = []
@@ -3560,14 +3755,19 @@ def pord_all(t):
lst.reverse()
n_und = 0
for j in lst:
- print ''
- print j,
- abc('&put; cone -O %d -s'%j)
+ print '\ncone %s. '%j,
+ abc('&put; cone -s -O %d'%j) #puts the &space into reg-space and extracts cone j
+ #requires that &space is not changed. &put resets status. Use &put -s to keep status
abc('scl -m')
-## ps()
-## run_parallel(slps+sims+bmcs+pdrs+[6],t,'US')
+ ps()
+## print 'running sp2'
+ ###
result = run_sp2_par(t)
- print 'run_sp2_par result is %s'%result
+## J = slps+JV
+## result = verify(J,t)
+## result = RESULT[result]
+## ###
+## print 'run_sp2_par result is %s'%result
if result == 'UNDECIDED':
n_und = n_und + 1
status = Undecided
@@ -3587,7 +3787,7 @@ def pord_all(t):
proved = proved + [j]
if j < n_pos_before - n_pos_proved:
n_proved = n_proved +1
- n_pos_proved = n_pos_proved + n_proved
+## n_pos_proved = n_pos_proved + n_proved. #this should not be here because we should start fresh
print '\nProved %d outputs'%len(proved)
print 'Disproved %d outputs'%len(disproved)
print 'Time for pord_all was %0.2f'%(time.time() - btime)
@@ -3595,12 +3795,15 @@ def pord_all(t):
cex_list = cx_list
if len(disproved)>0:
assert status == Sat, 'status = %d'%status
+ n_pos_proved = 0 #we want to reset this because of a bad speculation
return Sat
else:
+ n_pos_proved = n_pos_proved + n_proved
abc('r %s_osavetemp.aig'%f_name)
## abc('&put') # returning original to work spece
remove(proved)
- print '\nThe number of unproved POs reduced from %d to %d'%(N,n_pos())
+ print '\nThe number of unproved POs reduced from %d to %d'%(N,n_pos()),
+ ps()
if n_pos() > 0:
return Undecided
else:
@@ -3647,9 +3850,9 @@ def remove_disproved_pos(lst):
for i in range(len(lst)):
if not lst[i] == None:
abc('zeropo -N %d'%i)
- remove_0_pos()
+ l=remove_const_pos()
-def bmc_s(t):
+def bmc_j(t):
""" finds a cex in t seconds starting at 2*N where N is depth of bmc -T 1"""
x = time.time()
tt = min(5,max(1,.05*t))
@@ -3659,6 +3862,7 @@ def bmc_s(t):
return get_status()
## abc('bmc3 -T 1')
N = n_bmc_frames()
+ N = max(1,N)
## print bmc_depth()
## abc('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max(3,max_bmc))))
cmd = 'bmc3 -J 2 -D 4000 -C 1000000 -T %f -S %d'%(t,2*N)
@@ -3668,17 +3872,30 @@ def bmc_s(t):
print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())
return get_status()
+def pdrseed(t):
+ """uses the abstracted version now"""
+## abc('&get;,treb -rlim=60 -coi=3 -te=1 -vt=%f -seed=521'%t)
+ abc('&get;,treb -rlim=100 -vt=%f -seed=521'%t)
-def pdr(t):
+
+def pdrold(t):
abc('&get; ,pdr -vt=%f'%t)
+
+def pdr(t):
+ abc('&get; ,treb -vt=%f'%t)
+ return RESULT[get_status()]
+
+def pdra(t):
+## abc('&get; ,treb -rlim=100 -ssize -pre-cubes=3 -vt=%f'%t)
+ abc('&get; ,treb -abs -rlim=100 -gen-cex -vt=%f'%t)
return RESULT[get_status()]
def pdrm(t):
- abc('pdr -v -C 0 -T %f'%t)
+ abc('pdr -C 0 -T %f'%t)
return RESULT[get_status()]
def pdrmm(t):
- abc('pdr -mv -C 0 -T %f'%t)
+ abc('pdr -C 0 -M 298 -T %f'%t)
return RESULT[get_status()]
def split(n):
@@ -3699,48 +3916,44 @@ def keep_splitting():
def drill(n):
run_command('&get; &reachm -vcs -H 5 -S %d -T 50 -C 40'%n)
-def prove_1():
+def prove_1(ratio=.75):
"""
- A version of prove(). Called only during prove_pos or prove_g_pos or prove_only when we
- have speculated and produced multiple outputs. Does speculation only in final_verify_recur.
+ A version of prove called from prove_pos, prove_g_pos, prove_only, prove_g_pos_split when we
+ have speculated and produced multiple outputs.
Proves all the outputs together. If ever an abstraction was done then
if SAT is returned,we make RESULT return "undecided".
"""
global x_factor,xfi,f_name,x, initial_f_name
x = time.time()
max_bmc = -1
- K = 0
print 'Initial: ',
ps()
x_factor = xfi
- print 'x_factor = %f'%x_factor
- write_file('smp')
initial_f_name_save = initial_f_name #needed because we are making local backups here.
initial_f_name = '%s_temp'%initial_f_name
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) #backup 0
- K = K +1 #K = 1 is next backup
set_globals()
print'\n***Running abstract'
- nl_b = n_latches()
- status = abstract()
+ status = abstract(ifbip)
trim()
- write_file('abs')
status = process_status(status)
if ((status <= Unsat) or status == Error):
if status < Unsat:
print 'CEX in frame %d'%cex_frame(),
- print 'abstract found a cex in unabstacted circuit'
+ print 'abstract found a cex in initial circuit'
print 'Time for proof = %f sec.'%(time.time() - x)
initial_f_name = initial_f_name_save
return RESULT[status]
print 'Time for proof = %f sec.'%(time.time() - x)
initial_f_name = initial_f_name_save
return RESULT[status]
- abc('w %s_backup_%d.aig'%(initial_f_name,K)) #backup 1
- print 'Entering final_verify_recur(2) from prove_1()'
- status = final_verify_recur(2)
+ #undecided here
+ print 'Entering direct verificationb'
+#### status = final_verify_recur(2)
+ status = BMC_VER()
+ return status
+
trim()
- write_file('final')
+#### write_file('final')
print 'Time for proof = %f sec.'%(time.time() - x)
initial_f_name = initial_f_name_save
return RESULT[status]
@@ -3749,8 +3962,8 @@ def pre_reduce():
x = time.clock()
pre_simp()
write_file('smp')
- abstract()
- write_file('abs')
+ abstract(ifbip)
+#### write_file('abs')
print 'Time = %f'%(time.clock() - x)
def sublist(L,I):
@@ -3786,26 +3999,28 @@ def top_fork(J,t):
return get_status()
def run_sp2_par(t):
- """ Runs the single method super_prove(2), timed for t seconds."""
+ """ Runs the single method simple, timed for t seconds."""
global cex_list,methods
J = slps+[6]
+ print sublist(methods,J)
funcs = create_funcs(J,t)
y = time.time()
for i,res in pyabc_split.abc_split_all(funcs):
- #print 'i,res = %d,%s'%(i,res)
+## print 'i,res = %d,%s'%(i,res)
t = time.time()-y
if i == 0:
print 'sleep timer expired in %0.2f'%t
return 'UNDECIDED'
else:
+## print i,res
if res == 'UNSAT':
- print 'SUPER_PROVE2 proved UNSAT in %0.2f sec.'%t
+ print 'Simple_prove proved UNSAT in %0.2f sec.'%t
return 'UNSAT'
elif res == 'UNDECIDED':
- print 'SUPER_PROVE2 proved UNDECIDED in %0.2f sec.'%t
+ print 'Simple_prove proved UNDECIDED in %0.2f sec.'%t
return 'UNDECIDED'
else:
- print 'SUPER_PROVE2 found cex in %0.2f sec.'%t
+ print 'Simple_prove found cex in %0.2f sec.'%t
return 'SAT'
@@ -3930,6 +4145,23 @@ def fork_best(funcs,mts):
abc('r %s_best_aig.aig'%f_name)
return m_best,res
+def take_best(funcs,mts):
+ """ fork the functions, If not solved, take the best result in terms of AIG size"""
+ global f_name
+ n = len(mts)-1
+ y = time.time()
+ m_best = -1
+ best_size = 1000000
+ abc('w %s_best_aig.aig'%f_name)
+ for i,res in pyabc_split.abc_split_all(funcs):
+ if n_ands() < best_size:
+ best_size = n_ands()
+ m_best = i
+ abc('w %s_best_aig.aig'%f_name)
+ abc('r %s_best_aig.aig'%f_name)
+ return m_best,res
+
+
def fork_last(funcs,mtds):
""" fork the functions, and take first definitive answer, but
if last method ends first, then kill others"""
@@ -3938,7 +4170,9 @@ def fork_last(funcs,mtds):
y = time.time()
lst = ''
print mtds
+ #print 'starting fork_last'
for i,res in pyabc_split.abc_split_all(funcs):
+ #print i,res
status = prob_status()
if not status == -1: #solved here
m = i
@@ -3955,6 +4189,7 @@ def fork_last(funcs,mtds):
ps()
break
elif mtds[i] == 'sleep':
+ res = Undecided
t = time.time()-y
print 'sleep timer expired in %0.2f'%t
break
@@ -4011,226 +4246,226 @@ def super_pre_simp():
#______________________________
#new synthesis command
-def synculate(t):
- """
- Finds candidate sequential equivalences and refines them by simulation, BMC, or reachability
- using any cex found. If any are proved, then they are used to reduce the circuit. The final aig
- is a new synthesized circuit where all the proved equivalences are merged.
- If we put this in a loop with increasing verify times, then each time we work with a simpler model
- and new equivalences. Should approach srm. If in a loop, we can remember the cex_list so that we don't
- have to deal with disproved equivalences. Then use refine_with_cexs to trim the initial equivalences.
- If used in synthesis, need to distinguish between
- original outputs and new ones. Things to take care of: 1. a PO should not go away until it has been processes by merged_proved_equivalences
- 2. Note that &resim does not use the -m option where as in speculation - m is used. It means that if
- an original PO isfound to be SAT, the computation quits becasue one of the output
- miters has been disproved.
- """
- global G_C,G_T,n_pos_before, x_factor, n_latches_before, last_verify_time, f_name,cex_list, max_verify_time
-
-
- def refine_with_cexs():
- """Refines the gores file to reflect equivalences that go away because of cexs in cex_list"""
- global f_name
- abc('&r %s_gores.aig'%f_name)
- for j in range(len(cex_list)):
- cex_put(cex_list[j])
- run_command('&resim') #put the jth cex into the cex space and use it to refine the equivs
- abc('&w %s_gores.aig'%f_name)
- return
-
- def generate_srms():
- """generates a synthesized reduced model (srms) from the gores file"""
- global f_name, po_map
- abc('&r %s_gores.aig; &srm -sf; r gsrms.aig; w %s_gsrms.aig'%(f_name,f_name))
- print 'New srms = ',ps()
- po_map = range(n_pos())
- return 'OK'
-
- def merge_proved_equivalences():
- #this only changes the gores file.
- run_command('&r %s_gores.aig; &equiv_mark -vf %s_gsrms.aig; &reduce -v; &w %s_gores.aig'%(f_name,f_name,f_name))
- return
-
- def generate_equivalences():
- set_globals()
- t = max(1,.5*G_T)
- r = max(1,int(t))
- cmd = "&get; &equiv2 -C %d -F 200 -T %f -S 1 -R %d"%((G_C),t,r)
- abc(cmd)
- #run_command('&ps')
- eq_simulate(.5*t)
- #run_command('&ps')
- cmd = '&semi -W 63 -S 5 -C 500 -F 20 -T %d'%(.5*t)
- abc(cmd)
- #run_command('&ps')
- run_command('&w %s_gores.aig'%f_name)
-
- remove_0_pos() #makes sure no 0 pos to start
- cex_list = []
- n_pos_before = n_pos()
- n_latches_before = n_latches()
-## print 'Generating equivalences'
- generate_equivalences()
-## print 'Generating srms file'
- generate_srms() #this should not create new 0 pos
-## if n_pos()>100:
-## removed
- remove_0_pos()
- n_pos_last = n_pos()
- if n_pos_before == n_pos():
- print 'No equivalences found. Quitting synculate'
- return Undecided_no_reduction
- print 'Initial synculation: ',ps()
-## ps()
- set_globals()
- simp_sw = init = True
- simp_sw = False #temporary
- print '\nIterating synculation refinement'
- abc('w initial_sync.aig')
- max_verify_time = t
- print 'max_verify_time = %d'%max_verify_time
- """
- in the following loop we increase max_verify_time by twice time spent to find last cexs or Unsat's
- We iterate only when we have proved cex + unsat > 1/2 n_pos. Then we update srms and repeat.
- """
- while True: # refinement loop
- t = max_verify_time #this may have been increased since the last loop
-## print 'max_verify_time = %d'%max_verify_time
- set_globals()
- if not init:
- generate_srms() #generates a new gsrms file and leaves it in workspace
-## print 'generate_srms done'
- if n_pos() == n_pos_before:
- break
- if n_pos() == n_pos_last: #if nothing new, then quit if max_verification time is reached.
- if t > max_verify_time:
- break
- if simp_sw: #Warning: If this holds then simplify could create some 0 pos
- na = n_ands()
- simplify()
- while True:
- npo = n_pos()
-## print 'npos = %d'%npo
- merge_proved_equivalences() #So we need to merge them here. Can merging create more???
- generate_srms()
- if npo == n_pos():
- break
- if n_ands() > .7*na: #if not significant reduction, stop simplification
- simp_sw = False #simplify only once.
- if n_latches() == 0:
- return check_sat()
- n_pos_last = n_pos()
- init = False # make it so that next time it is not the first time through
- syn_par(t)
- if (len(cex_list)+len(result)) == 0: #nothing happened aand ran out of time.
- break
- abc('w %s_gsrms.aig'%f_name)
- #print 'No. of cexs after syn_parallel = %d'%len(cex_list)
- merge_proved_equivalences() #changes the underlying gores file by merging fanouts of proved eqs
- #print 'merge done'
- refine_with_cexs() #changes the gores file by refining the equivalences in it using cex_list.
- #print 'refine_with_cexs done'
- continue
- extract(0,n_pos_before) #get rid of unproved outputs
- return
-
-def syn_par(t):
- """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
- with the unproved outputs"""
- global trim_allowed,max_verify_time, n_pos_before
- global cex_list, result
- b_time = time.time()
- n = n_pos()
- if n == n_pos_before:
- return
- mx = n_pos()
- if n_pos() - n_pos_before > 50:
- mx = n_pos_before + 50
- r = range(n_pos_before, mx)
- N = max(1,(mx-n_pos_before)/2)
- abc('w %s__ysavetemp.aig'%f_name)
- F = [eval(FUNCS[18])] #create a timer function
- #print r
- for i in r:
- F = F + [eval('(pyabc_split.defer(verify_only)(%d,%d))'%(i,t))]
- cex_list = result = []
- outcome = ''
- #redirect printout here
-## with redirect.redirect( redirect.null_file, sys.stdout ):
-## with redirect.redirect( redirect.null_file, sys.stderr ):
- for i,res in pyabc_split.abc_split_all(F):
- status = get_status()
-## print i
- if i == 0: #timed out
- outcome = 'time expired after = %d'%(time.time() - b_time)
- break
- if status < Unsat:
- cex_list = cex_list + [cex_get()]
- if status == Unsat:
- result = result + [r[i-1]]
- if (len(result)+len(cex_list))>= N:
- T = time.time() - b_time
- if T > max_verify_time/2:
- max_verify_time = 2*T
- break
- continue
- if not outcome == '':
- print outcome
-## print 'cex_list,prove_list = ',cex_list,result
- abc('r %s__ysavetemp.aig'%f_name) #restore initial aig so that pos can be 0'ed out
- if not result == []: # found some unsat's
-## min_r = min(result)
-## max_r = max(result)
-## no = n_pos()
-## assert (0 <= min_r and max_r < no), 'min_r, max_r, length = %d, %d, %d'%(min_r,max_r,len(result))
- zero(result)
- return
- #print "Number PO's proved = %d"%len(result)
-
-def absec(n):
- #abc('w t.aig')
- for j in range(n):
- print '\nFrame %d'%(j+1)
- run_command('absec -F %d'%(j+1))
- if is_unsat():
- print 'UNSAT'
- break
-
-
-"""
- we might be proving some original pos as we go, and on the other hand we might have some equivalences that we
- can't prove. There are two uses, in verification
- verification - we want to remove the proved pos whether they are original or not. But if a cex for an original, then need to
- remember this.
- synthesis - the original outputs need to be kept and ignored in terms of cex's - supposedly they can't be proved.
-"""
-
-""" Experimental"""
-
-def csec():
- global f_name
- if os.path.exists('%s_part0.aig'%f_name):
- os.remove('%s_part0.aig'%f_name)
- run_command('demiter')
- if not os.path.exists('%s_part0.aig'%f_name):
- return
- run_command('r %s_part0.aig'%f_name)
- ps()
- run_command('comb')
- ps()
- abc('w %s_part0comb.aig'%f_name)
- run_command('r %s_part1.aig'%f_name)
- ps()
- run_command('comb')
- ps()
- abc('w %s_part1comb.aig'%f_name)
- run_command('&get; &cec %s_part0comb.aig'%(f_name))
- if is_sat():
- return 'SAT'
- if is_unsat():
- return 'UNSAT'
- else:
- return 'UNDECIDED'
+####def synculate(t):
+#### """
+#### Finds candidate sequential equivalences and refines them by simulation, BMC, or reachability
+#### using any cex found. If any are proved, then they are used to reduce the circuit. The final aig
+#### is a new synthesized circuit where all the proved equivalences are merged.
+#### If we put this in a loop with increasing verify times, then each time we work with a simpler model
+#### and new equivalences. Should approach srm. If in a loop, we can remember the cex_list so that we don't
+#### have to deal with disproved equivalences. Then use refine_with_cexs to trim the initial equivalences.
+#### If used in synthesis, need to distinguish between
+#### original outputs and new ones. Things to take care of: 1. a PO should not go away until it has been processes by merged_proved_equivalences
+#### 2. Note that &resim does not use the -m option where as in speculation - m is used. It means that if
+#### an original PO isfound to be SAT, the computation quits becasue one of the output
+#### miters has been disproved.
+#### """
+#### global G_C,G_T,n_pos_before, x_factor, n_latches_before, last_verify_time, f_name,cex_list, max_verify_time
+####
+####
+#### def refine_with_cexs():
+#### """Refines the gores file to reflect equivalences that go away because of cexs in cex_list"""
+#### global f_name
+#### abc('&r %s_gores.aig'%f_name)
+#### for j in range(len(cex_list)):
+#### cex_put(cex_list[j])
+#### run_command('&resim') #put the jth cex into the cex space and use it to refine the equivs
+#### abc('&w %s_gores.aig'%f_name)
+#### return
+####
+#### def generate_srms():
+#### """generates a synthesized reduced model (srms) from the gores file"""
+#### global f_name, po_map
+#### abc('&r %s_gores.aig; &srm -sf; r gsrms.aig; w %s_gsrms.aig'%(f_name,f_name))
+#### print 'New srms = ',ps()
+#### po_map = range(n_pos())
+#### return 'OK'
+####
+#### def merge_proved_equivalences():
+#### #this only changes the gores file.
+#### run_command('&r %s_gores.aig; &equiv_mark -vf %s_gsrms.aig; &reduce -v; &w %s_gores.aig'%(f_name,f_name,f_name))
+#### return
+####
+#### def generate_equivalences():
+#### set_globals()
+#### t = max(1,.5*G_T)
+#### r = max(1,int(t))
+#### cmd = "&get; &equiv2 -C %d -F 200 -T %f -S 1 -R %d"%((G_C),t,r)
+#### abc(cmd)
+#### #run_command('&ps')
+#### eq_simulate(.5*t)
+#### #run_command('&ps')
+#### cmd = '&semi -W 63 -S 5 -C 500 -F 20 -T %d'%(.5*t)
+#### abc(cmd)
+#### #run_command('&ps')
+#### run_command('&w %s_gores.aig'%f_name)
+####
+#### l=remove_const_pos() #makes sure no 0 pos to start
+#### cex_list = []
+#### n_pos_before = n_pos()
+#### n_latches_before = n_latches()
+###### print 'Generating equivalences'
+#### generate_equivalences()
+###### print 'Generating srms file'
+#### generate_srms() #this should not create new 0 pos
+###### if n_pos()>100:
+###### removed
+#### l=remove_const_pos()
+#### n_pos_last = n_pos()
+#### if n_pos_before == n_pos():
+#### print 'No equivalences found. Quitting synculate'
+#### return Undecided_no_reduction
+#### print 'Initial synculation: ',ps()
+###### ps()
+#### set_globals()
+#### simp_sw = init = True
+#### simp_sw = False #temporary
+#### print '\nIterating synculation refinement'
+#### abc('w initial_sync.aig')
+#### max_verify_time = t
+#### print 'max_verify_time = %d'%max_verify_time
+#### """
+#### in the following loop we increase max_verify_time by twice time spent to find last cexs or Unsat's
+#### We iterate only when we have proved cex + unsat > 1/2 n_pos. Then we update srms and repeat.
+#### """
+#### while True: # refinement loop
+#### t = max_verify_time #this may have been increased since the last loop
+###### print 'max_verify_time = %d'%max_verify_time
+#### set_globals()
+#### if not init:
+#### generate_srms() #generates a new gsrms file and leaves it in workspace
+###### print 'generate_srms done'
+#### if n_pos() == n_pos_before:
+#### break
+#### if n_pos() == n_pos_last: #if nothing new, then quit if max_verification time is reached.
+#### if t > max_verify_time:
+#### break
+#### if simp_sw: #Warning: If this holds then simplify could create some 0 pos
+#### na = n_ands()
+#### simplify()
+#### while True:
+#### npo = n_pos()
+###### print 'npos = %d'%npo
+#### merge_proved_equivalences() #So we need to merge them here. Can merging create more???
+#### generate_srms()
+#### if npo == n_pos():
+#### break
+#### if n_ands() > .7*na: #if not significant reduction, stop simplification
+#### simp_sw = False #simplify only once.
+#### if n_latches() == 0:
+#### return check_sat()
+#### n_pos_last = n_pos()
+#### init = False # make it so that next time it is not the first time through
+#### syn_par(t)
+#### if (len(cex_list)+len(result)) == 0: #nothing happened aand ran out of time.
+#### break
+#### abc('w %s_gsrms.aig'%f_name)
+#### #print 'No. of cexs after syn_parallel = %d'%len(cex_list)
+#### merge_proved_equivalences() #changes the underlying gores file by merging fanouts of proved eqs
+#### #print 'merge done'
+#### refine_with_cexs() #changes the gores file by refining the equivalences in it using cex_list.
+#### #print 'refine_with_cexs done'
+#### continue
+#### extract(0,n_pos_before) #get rid of unproved outputs
+#### return
+####
+####def syn_par(t):
+#### """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
+#### with the unproved outputs"""
+#### global trim_allowed,max_verify_time, n_pos_before
+#### global cex_list, result
+#### b_time = time.time()
+#### n = n_pos()
+#### if n == n_pos_before:
+#### return
+#### mx = n_pos()
+#### if n_pos() - n_pos_before > 50:
+#### mx = n_pos_before + 50
+#### r = range(n_pos_before, mx)
+#### N = max(1,(mx-n_pos_before)/2)
+#### abc('w %s__ysavetemp.aig'%f_name)
+#### F = [eval(FUNCS[18])] #create a timer function
+#### #print r
+#### for i in r:
+#### F = F + [eval('(pyabc_split.defer(verify_only)(%d,%d))'%(i,t))]
+#### cex_list = result = []
+#### outcome = ''
+#### #redirect printout here
+###### with redirect.redirect( redirect.null_file, sys.stdout ):
+###### with redirect.redirect( redirect.null_file, sys.stderr ):
+#### for i,res in pyabc_split.abc_split_all(F):
+#### status = get_status()
+###### print i
+#### if i == 0: #timed out
+#### outcome = 'time expired after = %d'%(time.time() - b_time)
+#### break
+#### if status < Unsat:
+#### cex_list = cex_list + [cex_get()]
+#### if status == Unsat:
+#### result = result + [r[i-1]]
+#### if (len(result)+len(cex_list))>= N:
+#### T = time.time() - b_time
+#### if T > max_verify_time/2:
+#### max_verify_time = 2*T
+#### break
+#### continue
+#### if not outcome == '':
+#### print outcome
+###### print 'cex_list,prove_list = ',cex_list,result
+#### abc('r %s__ysavetemp.aig'%f_name) #restore initial aig so that pos can be 0'ed out
+#### if not result == []: # found some unsat's
+###### min_r = min(result)
+###### max_r = max(result)
+###### no = n_pos()
+###### assert (0 <= min_r and max_r < no), 'min_r, max_r, length = %d, %d, %d'%(min_r,max_r,len(result))
+#### zero(result)
+#### return
+#### #print "Number PO's proved = %d"%len(result)
+####
+####def absec(n):
+#### #abc('w t.aig')
+#### for j in range(n):
+#### print '\nFrame %d'%(j+1)
+#### run_command('absec -F %d'%(j+1))
+#### if is_unsat():
+#### print 'UNSAT'
+#### break
+####
+####
+####"""
+#### we might be proving some original pos as we go, and on the other hand we might have some equivalences that we
+#### can't prove. There are two uses, in verification
+#### verification - we want to remove the proved pos whether they are original or not. But if a cex for an original, then need to
+#### remember this.
+#### synthesis - the original outputs need to be kept and ignored in terms of cex's - supposedly they can't be proved.
+####"""
+####
+####""" Experimental"""
+####
+####def csec():
+#### global f_name
+#### if os.path.exists('%s_part0.aig'%f_name):
+#### os.remove('%s_part0.aig'%f_name)
+#### run_command('demiter')
+#### if not os.path.exists('%s_part0.aig'%f_name):
+#### return
+#### run_command('r %s_part0.aig'%f_name)
+#### ps()
+#### run_command('comb')
+#### ps()
+#### abc('w %s_part0comb.aig'%f_name)
+#### run_command('r %s_part1.aig'%f_name)
+#### ps()
+#### run_command('comb')
+#### ps()
+#### abc('w %s_part1comb.aig'%f_name)
+#### run_command('&get; &cec %s_part0comb.aig'%(f_name))
+#### if is_sat():
+#### return 'SAT'
+#### if is_unsat():
+#### return 'UNSAT'
+#### else:
+#### return 'UNDECIDED'
###########################
#### we will verify outputs ORed in groups of g[i]
@@ -4251,20 +4486,809 @@ def csec():
#### print G
###########################
-def sop_balance(k):
- abc('st; if -K %d;ps'%k)
- for i in range(2):
- run_command('st; if -K %d;ps'%k)
- run_command('st; if g -C %d -K %d;ps'%(k+4,k+4))
- for i in range(3):
- run_command('st;&get; &dch; &put; if -K %d;ps'%k)
-def map_lut_dch(k):
- for i in range(5):
- run_command('st;if -a -K %d; ps; st; dch; ps; if -a -K %d; ps; mfs ;ps; lutpack; ps'%(k,k))
+""" These commands map into luts and leave the result in mapped format. To return to aig format, you
+have to do 'st'
+"""
+def sop_balance(k=4):
+ '''minimizes LUT logic levels '''
+## kmax = k
+ kmax=min(k+2,15)
+ abc('st; if -K %d;ps'%kmax)
+ print nl(),
+## for i in range(1):
+## abc('st; if -K %d;ps'%kmax)
+## run_command('ps')
+ kmax=min(k+2,15)
+ abc('st; if -g -C %d -K %d -F 2;ps'%(10,kmax)) #balance
+ print nl(),
+ for i in range(1):
+ abc('st;dch; if -C %d -K %d;ps'%(10,kmax))
+ print nl(),
+
+def speedup(k=4):
+ run_command('speedup;if -K %d'%k)
+ print nl()
+
+def speed_tradeoff(k=4):
+ print nl(),
+ best = n_nodes()
+ abc('write_blif %s_bestsp.blif'%f_name)
+ L_init = n_levels()
+ while True:
+ L_old = n_levels()
+ L = L_old -1
+ abc('speedup;if -D %d -F 2 -K %d -C 11'%(L,k))
+ if n_nodes() < best:
+ best = n_nodes()
+ abc('write_blif %s_bestsp.blif'%f_name)
+ if n_levels() == L_old:
+ break
+ print nl(),
+ continue
+ abc('r %s_bestsp.blif'%f_name)
+ return
+
+def map_lut_dch(k=4):
+ '''minimizes area '''
+ abc('st; dch; if -a -F 2 -K %d -C 11; mfs -a -L 50 ; lutpack -L 50'%k)
-def map_lut(k):
+def map_lut_dch_iter(k=8):
+## print 'entering map_lut_dch_iter with k = %d'%k
+ best = n_nodes()
+ abc('write_blif %s_best.blif'%f_name)
+## abc('st;dch;if -a -K %d -F 2 -C 11; mfs -a -L 1000; lutpack -L 1000'%k)
+## if n_nodes() < best:
+## abc('write_blif %s_best.blif'%f_name)
+## best = n_nodes()
+## print nl(),
+## else:
+## abc('r %s_best.blif'%f_name)
+## best = n_nodes()
+## abc('write_blif %s_best.blif'%f_name)
+## print 'best = %d'%best
+ n=0
+ while True:
+ map_lut_dch(k)
+ if n_nodes()< best:
+ best = n_nodes()
+## print 'best=%d'%best
+ n = 0
+ abc('write_blif %s_best.blif'%f_name)
+ print nl(),
+ continue
+ else:
+ n = n+1
+ if n>2:
+ break
+ abc('r %s_best.blif'%f_name)
+
+def dmitri_iter(k=8):
+ best = 100000
+ n=0
+ while True:
+ dmitri(k)
+ if n_nodes()< best:
+ best = n_nodes()
+## print '\nbest=%d'%best
+ n = 0
+ abc('write_blif %s_best.blif'%f_name)
+ continue
+ else:
+ n = n+1
+ if n>2:
+ break
+ abc('r %s_best.blif'%f_name)
+## run_command('cec -n %s.aig'%f_name)
+ print nl()
+
+
+def map_lut(k=4):
+ '''minimizes edge count'''
for i in range(5):
- run_command('st; if -e -K %d; ps; mfs ;ps; lutpack -L 50; ps'%(k))
+ abc('st; if -e -K %d; ps; mfs ;ps; lutpack -L 50; ps'%(k))
+ print nl(),
+
+def extractax(o=''):
+ abc('extract -%s'%o)
+
+def nl():
+ return [n_nodes(),n_levels()]
+
+def dc2_iter(th=.999):
+ abc('st')
+ while True:
+ na=n_ands()
+ abc('dc2')
+ print n_ands(),
+## print nl(),
+ if n_ands() > th*na:
+ break
+## print n_ands()
+
+def adc2_iter(th=.999):
+ abc('st;&get')
+ while True:
+ na=n_ands()
+ abc('&dc2;&put')
+## print n_ands(),
+ if n_ands() > th*na:
+ break
+ print n_ands()
+
+def try_extract():
+## abc('dc2;dc2')
+ print 'Initial: ',
+ ps()
+ na = n_ands()
+## abc('w %s_savetemp.aig'%f_name)
+ #no need to save initial aig since fork_best will return initial if best.
+ J = [32,33]
+ mtds = sublist(methods,J)
+ F = create_funcs(J,0)
+ (m,result) = take_best(F,mtds) #FORK here
+ if not m == -1:
+ print 'Best extract is %s: '%mtds[m],
+ ps()
+## if (n_ands() < na):
+## return
+## else:
+## abc('r %s_savetemp.aig'%f_name)
+
+def speedup_iter(k=8):
+ abc('st;if -K %d'%k)
+ run_command('ps')
+ abc('write_blif %s_bests.blif'%f_name)
+ run_command('ps')
+ best = n_levels()
+ print 'n_levels before speedup = %d'%n_levels()
+ n=0
+ while True:
+ nl()
+ abc('speedup;if -K %d'%k)
+ if n_levels() < best:
+ best = n_levels()
+ abc('write_blif %s_bests.blif'%f_name)
+ n=0
+ else:
+ n = n+1
+ if n>2:
+ break
+ abc('r %s_bests.blif'%f_name)
+ print 'n_levels = %d'%n_levels()
+
+def jog(n=16):
+ """ applies to a mapped blif file"""
+ run_command('eliminate -N %d;fx'%n)
+ run_command('if -K %d'%(n/2))
+ run_command('fx')
+
+def perturb_f(k=4):
+ abc('st;dch;if -g -K %d'%(k))
+## snap()
+ abc('speedup;if -K %d -C 10'%(k))
+ jog(5*k)
+## snap()
+## abc('if -a -K %d -C 11 -F 2;mfs -a -L 50;lutpack -L 50'%k
+
+def perturb(k=4):
+ abc('st;dch;if -g -K %d'%k)
+## snap()
+ abc('speedup;if -K %d -C 10'%(k))
+def preprocess(k=4):
+ n_initial = n_nodes()
+ abc('write_blif %s_temp_initial.blif'%f_name)
+## abc('st;dc2')
+ abc('w %s_temp_initial.aig'%f_name)
+ ni = n_pis() + n_latches()
+ res = 1
+ if ni >= 101:
+ abc('st;if -a -F 2 -K %d'%k)
+ return res
+## dc2_iter()
+ abc('st;if -a -K %d'%k) # to get plain direct map
+ if n_nodes() > n_initial:
+ abc('r %s_temp_initial.blif'%f_name)
+ res = 1
+ #plain
+ n_plain = n_nodes()
+## print nl()
+ abc('write_blif %s_temp_plain.blif'%f_name)
+ #clp
+ abc('st;clp; if -a -K %d'%k)
+## print nl()
+ abc('write_blif %s_temp_clp.blif'%f_name)
+ n_clp = n_nodes()
+ #clp_lutmin
+ abc('r %s_temp_initial.blif'%f_name)
+ abc('st;clp;lutmin -K %d;'%k)
+ abc('write_blif %s_temp_clp_lut.blif'%f_name)
+ n_clp_lut = n_nodes()
+## print nl()
+ if n_plain <= min(n_clp,n_clp_lut):
+ abc('r %s_temp_plain.blif'%f_name)
+ res = 1
+ elif n_clp < n_clp_lut:
+ abc('r %s_temp_clp.blif'%f_name)
+ res = 1
+ else:
+ abc('r %s_temp_clp_lut.blif'%f_name)
+ res = 1
+## print nl()
+ return res
+
+def snap():
+## abc('fraig;fraig_store')
+ abc('fraig_store')
+
+def snap_bestk(k):
+ abc('write_blif %s_temp.blif'%f_name)
+ unsave_bestk(k)
+ snap()
+ abc('r %s_temp.blif'%f_name)
+
+def cec_it():
+ """ done because &r changes the names. Can't use -n because rfraig_store reorders pis and pos."""
+ abc('write_blif %s_temp.blif'%f_name)
+ abc('&r %s.aig;&put'%f_name)
+ run_command('cec %s_temp.blif'%f_name)
+ abc('r %s_temp.blif'%f_name)
+
+def save_bestk(b,k):
+## if os.access('%s_best%d.blif'%(f_name,k),os.R_OK):
+## res = get_bestk(k)
+## else:
+ """ saves the best, returns bestk and if not best, leaves blif unchanged"""
+ res = b
+ if n_nodes() < res:
+ res = n_nodes()
+ abc('write_blif %s_best%d.blif'%(f_name,k))
+ print 'best%d = %d'%(k,res)
+ return res
+## unsave_bestk(k)
+
+def unsave_bestk(k):
+ abc('r %s_best%d.blif'%(f_name,k))
+
+def unsnap(k=4):
+## snap()
+ abc('fraig_restore')
+ map_lut_dch(k)
+## abc('fraig_restore;if -a -F 2 -C 11 -K %d'%k)
+
+def map_until_conv(k=4):
+ kk = 2*k
+ # make sure that no residual results are left over.
+ if os.access('%s_best%d.blif'%(f_name,k),os.R_OK):
+ os.remove('%s_best%d.blif'%(f_name,k))
+ if os.access('%s_best%d.blif'%(f_name,kk),os.R_OK):
+ os.remove('%s_best%d.blif'%(f_name,kk))
+ tt = time.time()
+ #get initial map and save
+ map_lut_dch(k)
+ bestk = save_bestk(100000,k)
+ print nl()
+## snap()
+ res = preprocess() #get best of initial, clp, and lutmin versions
+ print nl()
+## map_lut_dch(k)
+## ###
+## bestk = save_bestk(bestk,k)
+## map_iter(k)
+## bestk = save_bestk(bestk,k)
+## ###
+ map_lut_dch_iter(kk) #initialize with mapping with 2k input LUTs
+ bestkk = save_bestk(100000,kk)
+ snap()
+ unsnap(k) #have to do snap first if want current result snapped in.
+ # unsnap fraigs snapshots and does map_lut_dch at end
+ print nl()
+ bestk = save_bestk(bestk,k)
+ abc('r %s_bestk%d.blif'%(f_name,k))
+ map_iter(k) #1
+ bestk = save_bestk(bestk,k)
+ while True:
+ print 'Perturbing with %d-Lut'%kk
+## snap()
+ map_lut_dch_iter(kk)
+## snap()
+ bestkk_old = bestkk
+ bestkk = save_bestk(bestkk,kk)
+ if bestkk >= bestkk_old:
+ break
+## snap()
+## jog(kk)
+## perturb_f(k)
+## snap()
+## perturb_f(k)
+## snap()
+## unsave_bestk(k)
+## map_lut_dch(k+1)
+## snap()
+## snap_bestk(k)
+ snap()
+ unsnap(k) #fraig restore and map
+## bestk = save_bestk(bestk,k)
+## snap()
+ bestk_old = bestk
+ map_iter(k)
+ bestk = save_bestk(bestk,k)
+ if bestk >= bestk_old:
+ break
+ continue
+ abc('fraig_restore') #dump what is left in fraig_store
+ unsave_bestk(k)
+ print '\nFinal size = ',
+ print nl()
+ print 'time for %s = %.02f'%(f_name,(time.time()-tt))
+## cec_it()
+
+def get_bestk(k=4):
+ abc('write_blif %s_temp.blif'%f_name)
+ unsave_bestk(k)
+ res = n_nodes()
+ abc('r %s_temp.blif'%f_name)
+ return res
+
+def map_iter(k=4):
+ tt = time.time()
+ bestk = get_bestk(k)
+## bestk = n_nodes()
+## bestk = save_bestk(bestk,k)
+## abc('st;dch;if -a -F 2 -K %d -C 11; mfs -a -L 1000; lutpack -L 1000'%k)#should be same as Initial
+## map_lut_dch_iter(k) ####
+ map_lut_dch(k)
+ bestk = save_bestk(bestk,k)
+ n=0
+ unsave_bestk(k)
+ while True:
+## snap()
+ perturb(k) #
+## snap()
+ perturb(k)
+## snap_bestk(k)
+## unsnap(k)
+## bestk = save_bestk(bestk,k)
+## snap()
+## map_lut_dch(k+1)
+## abc('if -K %d'%(k+1))
+## snap()
+## unsnap(k)
+ old_bestk = bestk
+## print old_bestk
+ map_lut_dch_iter(k)
+ bestk = save_bestk(bestk,k)
+ print bestk
+ if bestk < old_bestk:
+ n=0 # keep it up
+ continue
+ elif n == 2: #perturb
+ break
+ else:
+ n = n+1
+ print '%d-perturb'%n
+## snap()
+## unsave_bestk(k)
+ unsave_bestk(k)
+
+def map_star(k=4):
+ tt = time.time()
+ map_until_conv(k)
+ abc('write_blif %s_best_star.blif'%f_name)
+ best = n_nodes()
+ while True:
+ jog(2*k)
+ map_until_conv(k)
+ if n_nodes() >= best:
+ break
+ else:
+ best = n_nodes()
+ abc('write_blif %s_best_star.blif'%f_name)
+ abc('r %s_best_star.blif'%f_name)
+ print 'SIZE = %d, TIME = %.2f for %s'%(n_nodes(),(time.time() - tt),f_name)
+
+def decomp_444():
+ abc('st; dch; if -K 10 -S 444')
+ abc('write_blif -S 444 %s_temp.blif; r %s_temp.blif'%(f_name,f_name))
+
+def dmitri(k=8):
+## abc('w t.aig')
+## dc2_iter()
+## print 'first iter done: %d'%n_ands()
+## abc('dc2rs')
+#### dc2_iter()
+## print 'second iter done: %d'%n_ands()
+## sop_balance(k)
+## abc('w t_before.aig')
+## run_command('cec -n t.aig')
+## speedup_iter(k)
+## print 'n_levels after speedup = %d'%n_levels()
+## abc('write_blif %s_save.blif'%f_name)
+## nn=n_levels()
+ abc('st;dch; if -g -K %d'%(k))
+## print 'n_levels after sop balance = %d'%n_levels()
+## if n_levels() > nn:
+## run_command('r %s_save.blif'%f_name)
+## print 'n_levels = %d'%n_levels()
+## print 'final n_levels = %d'%n_levels()
+## print 'sop_balance done: ',
+## print nl()
+## run_command('st;w t_after.aig')
+## run_command('cec -n t.aig')
+ abc('if -G %d '%k)
+## print 'after if -G %d: '%k,
+## print nl()
+## run_command('cec -n t.aig')
+ abc('cubes')
+## print 'after cubes: ',
+## print nl()
+## run_command('cec -n t.aig')
+ abc('addbuffs -v')
+## print 'after addbuffs: ',
+ print nl(),
+## run_command('cec -n t.aig')
+
+def lut():
+ dc2_iter()
+ abc('extract -a')
+ print nl()
+ dc2_iter()
+## ps()
+ sop_balance(6)
+ map_lut_dch()
+ map_lut()
+ print nl()
+## run_command('ps')
+
+################################## gate level abstraction
+ """
+ Code for using
+ for abstraction
+ """
+
+def bip_abs(t=100):
+ """ t is ignored here"""
+ set_globals()
+ time = max(1,.1*G_T)
+ abc('&get;,bmc -vt=%f'%time)
+ set_max_bmc(bmc_depth())
+ c = 2*G_C
+ f = max(2*max_bmc,20)
+ b = min(max(10,max_bmc),200)
+ t1 = x_factor*max(1,2*G_T)
+ t = max(t1,t)
+ s = min(max(3,c/30000),10) # stability between 3 and 10
+## cmd = '&get;,abs -bob=%d -stable=%d -timeout=%d -vt=%d -depth=%d -dwr=vabs'%(b,s,t,t,f)
+ cmd = '&get;,abs -timeout=%d -vt=%d -dwr=%s_vabs'%(t,t,f_name)
+ print 'Running %s'%cmd
+## abc(cmd)
+ run_command(cmd)
+ bmc_depth()
+ abc('&w %s_greg.aig'%f_name)
+ return max_bmc
+
+def check_frames():
+ abc('read_status vta.status')
+ return n_bmc_frames()
+
+def gate_abs(t):
+ """ Do gate-level abstraction for F frames """
+ r = 100 *(1 - abs_ratio)
+ run_command('orpos; &get;&vta -dv -A %s_vabs.aig -P 2 -T %d -R %d; &vta_gla;&gla_derive; &put'%(f_name,t,r))
+## write_file('abs')
+
+def gla_abs(t):
+ """ Do gate-level abstraction for F frames """
+ r = 100 *(1 - abs_ratio)
+ run_command('orpos; &get;&gla_cba -C 0 -T %d -F 0 -R %d; &gla_derive; &put'%(t,r))
+
+def sizeof():
+ return [n_pis(),n_pos(),n_latches(),n_ands()]
+
+def abstract(ifb=2):
+ global abs_ratio
+## print 'ifb = %d'%ifb
+ if ifb == 0: #new way using gate_abs and no bip
+ return abstracta(False)
+ elif ifb == 1: #old way using ,abs
+ assert ifb == ifbip, 'call to abstract has ifb not = global ifbip'
+ return abstractb()
+ else:
+ #new way using ,abs -dwr -- (bip_abs)
+ return abstracta(True)
+
+def abstracta(if_bip=True):
+ """
+ if_bip = 0 it uses a new abstraction based on &vta (gate level abstraction) and no bip operations
+ Right now, if we do not prove it with abstraction in the time allowed,
+ we abandon abstraction and go on with speculation
+ if_bip = 1, we use ,abs -dwr
+ """
+ global G_C, G_T, latches_before_abs, x_factor, last_verify_time, x, win_list, j_last, sims
+ global latches_before_abs, ands_before_abs, pis_before_abs, abs_ratio
+## n_vabs = 0
+ latches_before_abs = n_latches()
+ ands_before_abs = n_ands()
+ pis_before_abs = n_real_inputs()
+ tt = time.time()
+ print 'using abstracta, ',
+## print 'if_bip = %d'%if_bip
+## latch_ratio = abs_ratio
+## t = 100
+ t = 1000 #temporary
+ t = abs_time
+ if if_bip == 0:
+ t = 1000 #timeout on vta
+ t = abs_time
+ tt = time.time()
+ if n_pos() > 1 and if_bip == 0:
+ abc('orpos')
+ print 'POs ORed together, ',
+ initial_size = sizeof()
+ abc('w %s_before_abs.aig'%f_name)
+ # 25 below means that it will quit if #FF+#ANDS > 75% of original
+## funcs = [eval("(pyabc_split.defer(abc)('orpos;&get;&vta -d -R 25'))")] #right now we need orpos
+ if if_bip:
+ print 'using bip_abs'
+ mtds = ['bip_abs']
+ funcs = [eval('(pyabc_split.defer(bip_abs)(t))')]
+ else:
+ print 'using gate_abs'
+ mtds = ['gate_abs']
+ funcs = [eval('(pyabc_split.defer(gate_abs)(t))')]
+ funcs = funcs + [eval('(pyabc_split.defer(monitor_and_prove)())')]
+ J = [34,30]
+ if n_ands()> 500000: #if greater than this, bmc_j may take too much memory.
+ J = [34]
+## J=[]
+ funcs = funcs + create_funcs(J,1000)
+ mtds = mtds + ['monitor_and_prove'] + sublist(methods,J)
+ print 'methods = ',
+ print mtds
+ vta_term_by_time=0
+ for i,res in pyabc_split.abc_split_all(funcs):
+## print i,res
+ if i == 0: #vta ended first
+ print 'time taken = %0.2f'%(time.time() - tt)
+ if is_sat():
+ print 'vta abstraction found cex in frame %d'%cex_frame()
+ return Sat
+ if is_unsat():
+ print 'vta abstraction proved UNSAT'
+ return Unsat
+ else: #undecided
+ if time.time() - tt < .95*t:
+ print 'abstraction terminated but not by timeout'
+ vta_term_by_time = 0
+ break
+ else:
+ print 'abstraction terminated by a timeout of %d'%t
+## print 'final abstraction: ',
+## ps()
+ vta_term_by_time=1
+ break
+ if i == 1: #monitor and prove ended first (sleep timed out)
+## print i,res
+ if res > Unsat: #we abandon abstraction
+## print 'final abstraction: ',
+## ps()
+## print 'Trying to verify final abstraction'
+## result = verify([7,9,19,23,24,30],100) #do this if if_bip==0
+## if result == Unsat:
+## print 'Abstraction proved valid'
+## return result
+## else:
+## print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+## abc('r %s_before_abs.aig'%f_name)
+## result = Undecided_no_reduction
+## return result
+## elif res == Undecided_no_reduction:
+ print 'monitor and prove timed out or little reduction'
+ abc('r %s_before_abs.aig'%f_name)
+ return Undecided_no_reduction
+ else:
+ if not initial_size == sizeof(): #monitor and prove should not return SAT in this case'
+ assert not is_sat(), 'monitor_and_prove returned SAT on abstraction!'
+ print 'time taken = %0.2f'%(time.time() - tt)
+ if is_unsat():
+ return Unsat
+ elif is_sat():
+ return Sat
+ else:
+ abc('r %s_before_abs.aig'%f_name)
+ return Undecided_no_reduction
+ else: #one of the engines got an answer
+ print 'time taken = %0.2f'%(time.time() - tt)
+ if is_unsat():
+ print 'Initial %s proved UNSAT'%mtds[i]
+ return Unsat
+ if is_sat():
+ print 'Initial %s proved SAT'%mtds[i]
+ return Sat
+ else: # an engine failed here
+ print 'Initial %s terminated without result'%mtds[i]
+## return Undecided
+ continue
+ if vta_term_by_time == 0 and if_bip == 0: #vta timed out itself
+ print 'Trying to verify final abstraction',
+ ps()
+ result = verify([7,9,19,23,24,30],100)
+ if result == Unsat:
+ print 'Abstraction proved valid'
+ return result
+ # should do abstraction refinement here if if_bip==1
+ if if_bip == 0:
+ print 'abstraction no good - restoring initial simplified AIG'
+ abc('r %s_before_abs.aig'%f_name)
+ return Undecided_no_reduction
+ else:
+ if is_sat():
+ print 'Found true counterexample in frame %d'%cex_frame()
+ return Sat_true
+ if is_unsat():
+ return Unsat
+ ## set_max_bmc(NBF)
+ NBF = bmc_depth()
+ print 'Abstraction good to %d frames'%max_bmc
+ #note when things are done in parallel, the &aig is not restored!!!
+ abc('&r %s_greg.aig; &w initial_greg.aig; &abs_derive; &put; w initial_gabs.aig; w %s_gabs.aig'%(f_name,f_name))
+ set_max_bmc(NBF)
+ print 'Initial abstraction: ',
+ ps()
+ abc('w %s_init_abs.aig'%f_name)
+ latches_after = n_latches()
+ ## if latches_after >= .90*latches_before_abs: #the following should match similar statement
+ ## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or
+ ## (latches_after >= .75*latches_before_abs)):
+ if small_abs(abs_ratio):
+ abc('r %s_before_abs.aig'%f_name)
+ print "Little reduction!"
+ print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+ return Undecided_no_reduction
+ sims_old = sims
+ sims=sims[:1] #make it so that rarity sim is not used since it can't find a cex
+ result = abstraction_refinement(latches_before_abs, NBF,abs_ratio)
+ sims = sims_old
+ if result <= Unsat:
+ return result
+ ## if n_latches() >= .90*latches_before_abs:
+ ## if ((rel_cost_t([pis_before_abs, latches_before_abs, ands_before_abs])> -.1) or (latches_after >= .90*latches_before_abs)):
+ ## if rel_cost_t([pis_before_abs,latches_before_abs, ands_before_abs])> -.1:
+ if small_abs(abs_ratio): #r is ratio of final to initial latches in absstraction. If greater then True
+ abc('r %s_before_abs.aig'%f_name) #restore original file before abstract.
+ print "Little reduction! ",
+ print 'Abstract time wasted = %0.2f'%(time.time()-tt)
+ result = Undecided_no_reduction
+ return result
+ #new
+ else:
+ write_file('abs') #this is only written if it was not solved and some change happened.
+ print 'Abstract time = %0.2f'%(time.time()-tt)
+ return result
+
+def monitor_and_prove():
+ """
+ monitor and prove. whenever a new vabs is found, try to verify it
+ """
+ global ifbip
+ #write the current aig as vabs.aig so it will be regularly verified at the beginning.
+## print 'Entering monitora_and_prove'
+ print ifbip
+ run_command('w %s_vabs.aig'%f_name)
+ if ifbip == 0:
+ run_command('w vabs.aig')
+ initial_size = sizeof()
+ print 'initial size = ',
+ print initial_size
+ funcs = [eval('(pyabc_split.defer(read_and_sleep)())')]
+ t = 1000 # do not want to timeout verification engines.
+ t = abs_time
+ J = [9,19,23,24,34] #engines BMC3,PDRMsd,INTRPm,REACHY - engines for first time through when no abstraction
+ funcs = funcs + create_funcs(J,t)
+ mtds = ['read_and_sleep'] + sublist(methods,J)
+ print 'methods = %s'%mtds
+ #a return of Undecided means that abstraction might be good and calling routine will check this
+ while True:
+ time_done = abs_bad = 0
+ for i,res in pyabc_split.abc_split_all(funcs):
+## print i,res
+ if i == 0: # read and sleep terminated
+ if res == False: #found new abstraction
+ abs_bad = 0 #new abs starts out good.
+ if not initial_size == sizeof() and n_latches() > abs_ratio * initial_size[2]:
+ return Undecided_no_reduction
+ else:
+ break
+ elif res == True: # read and sleep timed out
+ time_done = 1
+## print 'read_and_sleep timed out'
+ if abs_bad:
+ return Undecided_no_reduction
+ else: #abs is still good. Let other engines continue
+ return Undecided #calling routine handles >Unsat all the same right now.
+ else:
+ assert False, 'something wrong. read and sleep did not return right thing'
+ if i > 0: #got result from one of the verify engines
+## print 'method %s found SAT in frame %d'%(mtds[i],cex_frame())
+ if is_unsat():
+ print 'Parallel method %s proved UNSAT on current abstraction'%mtds[i]
+ return Unsat
+ if is_sat(): #abstraction is not good yet.
+ print 'Parallel method %s found SAT on current abstraction in frame %d'%(mtds[i],cex_frame())
+## print 'n_vabs = %d'%n_vabs
+ if initial_size == sizeof():# the first time we were working on an aig before abstraction
+ return Sat
+## print 'current abstraction invalid'
+ abs_bad = 1
+ break #this kills off other verification engines working on bad abstraction
+ else: #one of the engines undecided for some reason - failed?
+ print 'Parallel %s terminated without result on current abstraction'%mtds[i]
+ continue
+ if abs_bad and not time_done: #here we wait until have a new vabs.
+ print 'current abstraction bad, waiting for new one'
+## print 'waiting for new abstraction'
+ abc('r %s_abs.aig'%f_name) #read in the abstraction to destroy is_sat.
+ res = read_and_sleep(5) #this will check every 5 sec, until abs_time sec has passed without new abs
+ if res == False: #found new vabs. Now continue if vabs small enough
+## print 'n_vabs = %d'%n_vabs
+ if (not initial_size == sizeof()) and n_latches() > abs_ratio * initial_size[2]:
+ return Undecided_no_reduction
+ else:
+ continue
+ elif res ==True: #read_and_sleep timed out
+## print 'read_and_sleep timed out'
+ return Undecided_no_reduction
+ else:
+ break #this should not happen
+ elif abs_bad and time_done:
+ print 'current abstraction bad, time has run out'
+ return Undecided_no_reduction
+ elif time_done: #abs is good here
+ print 'current abstraction still good, time has run out'
+ return Undecided #this will cause calling routine to try to verify the final abstraction
+ #right now handles the same as Undecided_no_reduction-if time runs out we quit abstraction
+ else: #abs good and time not done
+ print 'current abstraction still good, time has not run out'
+ #we want to continue but after first time, we use expanded set of engines.
+ funcs = [eval('(pyabc_split.defer(read_and_sleep)())')]
+ funcs = funcs + create_funcs(J,t) #use old J first time
+ mtds = ['read_and_sleep'] + sublist(methods,J)
+ if initial_size == sizeof():
+ print 'methods = %s'%mtds
+ J = [7,9,19,23,24,30] #first time reflects that 7 and 30 are already being done
+ continue #will try with new vabs
+
+def read_and_sleep(t=5):
+ """
+ keep looking for a new vabs every 5 seconds. This is usually run in parallel with
+ &vta -d
+ """
+ #t is not used at present
+ tt = time.time()
+ T = 1000 #if after the last abstraction, no answer, then terminate
+ T = abs_time
+ set_size()
+ name = '%s_vabs.aig'%f_name
+## if ifbip > 0:
+## name = '%s_vabs.aig'%f_name
+#### print 'name = %s'%name
+ while True:
+ if time.time() - tt > T: #too much time between abstractions
+ print 'read_and_sleep timed out in %d sec.'%T
+ return True
+ if os.access('%s'%name,os.R_OK):
+ abc('r %s'%name)
+ abc('w %s_vabs_old.aig'%f_name)
+ os.remove('%s'%name)
+ if not check_size():
+ print '\nNew abstraction: ',
+ ps()
+ set_size()
+ abc('w %s_abs.aig'%f_name)
+ return False
+ #if same size, keep going.
+ print '.',
+ sleep(5)
+####################################################