summaryrefslogtreecommitdiffstats
path: root/src/bdd/reo
diff options
context:
space:
mode:
authorAlan Mishchenko <alanmi@berkeley.edu>2005-07-29 08:01:00 -0700
committerAlan Mishchenko <alanmi@berkeley.edu>2005-07-29 08:01:00 -0700
commit888e5bed5d7f56a5d86d91a6e8e88f3e5a3454dc (patch)
tree11d48c9e9069f54dc300c3571ae63c744c802c50 /src/bdd/reo
parent7f94414388cce67bd3cc1a6d6269f0ed31ed0d06 (diff)
downloadabc-888e5bed5d7f56a5d86d91a6e8e88f3e5a3454dc.tar.gz
abc-888e5bed5d7f56a5d86d91a6e8e88f3e5a3454dc.tar.bz2
abc-888e5bed5d7f56a5d86d91a6e8e88f3e5a3454dc.zip
Version abc50729
Diffstat (limited to 'src/bdd/reo')
-rw-r--r--src/bdd/reo/module.make7
-rw-r--r--src/bdd/reo/reo.h222
-rw-r--r--src/bdd/reo/reoApi.c289
-rw-r--r--src/bdd/reo/reoCore.c438
-rw-r--r--src/bdd/reo/reoProfile.c365
-rw-r--r--src/bdd/reo/reoSift.c341
-rw-r--r--src/bdd/reo/reoSwap.c898
-rw-r--r--src/bdd/reo/reoTest.c251
-rw-r--r--src/bdd/reo/reoTransfer.c199
-rw-r--r--src/bdd/reo/reoUnits.c184
10 files changed, 3194 insertions, 0 deletions
diff --git a/src/bdd/reo/module.make b/src/bdd/reo/module.make
new file mode 100644
index 00000000..703be139
--- /dev/null
+++ b/src/bdd/reo/module.make
@@ -0,0 +1,7 @@
+SRC += bdd\reo\reoApi.c \
+ bdd\reo\reoCore.c \
+ bdd\reo\reoProfile.c \
+ bdd\reo\reoSift.c \
+ bdd\reo\reoSwap.c \
+ bdd\reo\reoTransfer.c \
+ bdd\reo\reoUnits.c
diff --git a/src/bdd/reo/reo.h b/src/bdd/reo/reo.h
new file mode 100644
index 00000000..7e4be855
--- /dev/null
+++ b/src/bdd/reo/reo.h
@@ -0,0 +1,222 @@
+/**CFile****************************************************************
+
+ FileName [reo.h]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [External and internal declarations.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reo.h,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#ifndef __REO_H__
+#define __REO_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "extra.h"
+
+////////////////////////////////////////////////////////////////////////
+/// MACRO DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+// reordering parameters
+#define REO_REORDER_LIMIT 1.15 // determines the quality/runtime trade-off
+#define REO_QUAL_PAR 3 // the quality [1 = simple lower bound, 2 = strict, larger = heuristic]
+// internal parameters
+#define REO_CONST_LEVEL 30000 // the number of the constant level
+#define REO_TOPREF_UNDEF 30000 // the undefined top reference
+#define REO_CHUNK_SIZE 5000 // the number of units allocated at one time
+#define REO_COST_EPSILON 0.0000001 // difference in cost large enough so that it counted as an error
+#define REO_HIGH_VALUE 10000000 // a large value used to initialize some variables
+// interface parameters
+#define REO_ENABLE 1 // the value of the enable flag
+#define REO_DISABLE 0 // the value of the disable flag
+
+// the types of minimization currently supported
+typedef enum {
+ REO_MINIMIZE_NODES,
+ REO_MINIMIZE_WIDTH, // may not work for BDDs with complemented edges
+ REO_MINIMIZE_APL
+} reo_min_type;
+
+////////////////////////////////////////////////////////////////////////
+/// DATA STRUCTURES ///
+////////////////////////////////////////////////////////////////////////
+
+typedef struct _reo_unit reo_unit; // the unit representing one DD node during reordering
+typedef struct _reo_plane reo_plane; // the set of nodes on one level
+typedef struct _reo_hash reo_hash; // the entry in the hash table
+typedef struct _reo_man reo_man; // the reordering manager
+typedef struct _reo_test reo_test; //
+
+struct _reo_unit
+{
+ short lev; // the level of this node at the beginning
+ short TopRef; // the top level from which this node is refed (used to update BDD width)
+ short TopRefNew; // the new top level from which this node is refed (used to update BDD width)
+ short n; // the number of incoming edges (similar to ref count in the BDD)
+ int Sign; // the signature
+
+ reo_unit * pE; // the pointer to the "else" branch
+ reo_unit * pT; // the pointer to the "then" branch
+ reo_unit * Next; // the link to the next one in the list
+ double Weight; // the probability of traversing this node
+};
+
+struct _reo_plane
+{
+ int fSifted; // to mark the sifted variables
+ int statsNodes; // the number of nodes in the current level
+ int statsWidth; // the width on the current level
+ double statsApl; // the sum of node probabilities on this level
+ double statsCost; // the current cost is stored here
+ double statsCostAbove; // the current cost is stored here
+ double statsCostBelow; // the current cost is stored here
+
+ reo_unit * pHead; // the pointer to the beginning of the unit list
+};
+
+struct _reo_hash
+{
+ int Sign; // signature of the current cache operation
+ unsigned Arg1; // the first argument
+ unsigned Arg2; // the second argument
+ unsigned Arg3; // the second argument
+};
+
+struct _reo_man
+{
+ // these paramaters can be set by the API functions
+ int fMinWidth; // the flag to enable reordering for minimum width
+ int fMinApl; // the flag to enable reordering for minimum APL
+ int fVerbose; // the verbosity level
+ int fVerify; // the flag toggling verification
+ int fRemapUp; // the flag to enable remapping
+ int nIters; // the number of interations of sifting to perform
+
+ // parameters given by the user when reordering is called
+ DdManager * dd; // the CUDD BDD manager
+ int * pOrder; // the resulting variable order will be returned here
+
+ // derived parameters
+ int fThisIsAdd; // this flag is one if the function is the ADD
+ int * pSupp; // the support of the given function
+ int nSuppAlloc; // the max allowed number of support variables
+ int nSupp; // the number of support variables
+ int * pOrderInt; // the array storing the internal variable permutation
+ double * pVarCosts; // other arrays
+ int * pLevelOrder; // other arrays
+ reo_unit ** pWidthCofs; // temporary storage for cofactors used during reordering for width
+
+ // parameters related to cost
+ int nNodesBeg;
+ int nNodesCur;
+ int nNodesEnd;
+ int nWidthCur;
+ int nWidthBeg;
+ int nWidthEnd;
+ double nAplCur;
+ double nAplBeg;
+ double nAplEnd;
+
+ // mapping of the function into planes and back
+ int * pMapToPlanes; // the mapping of var indexes into plane levels
+ int * pMapToDdVarsOrig;// the mapping of plane levels into the original indexes
+ int * pMapToDdVarsFinal;// the mapping of plane levels into the final indexes
+
+ // the planes table
+ reo_plane * pPlanes;
+ int nPlanes;
+ reo_unit ** pTops;
+ int nTops;
+ int nTopsAlloc;
+
+ // the hash table
+ reo_hash * HTable; // the table itself
+ int nTableSize; // the size of the hash table
+ int Signature; // the signature counter
+
+ // the referenced node list
+ int nNodesMaxAlloc; // this parameters determins how much memory is allocated
+ DdNode ** pRefNodes;
+ int nRefNodes;
+ int nRefNodesAlloc;
+
+ // unit memory management
+ reo_unit * pUnitFreeList;
+ reo_unit ** pMemChunks;
+ int nMemChunks;
+ int nMemChunksAlloc;
+ int nUnitsUsed;
+
+ // statistic variables
+ int HashSuccess;
+ int HashFailure;
+ int nSwaps; // the number of swaps
+ int nNISwaps; // the number of swaps without interaction
+};
+
+// used to manipulate units
+#define Unit_Regular(u) ((reo_unit *)((unsigned long)(u) & ~01))
+#define Unit_Not(u) ((reo_unit *)((long)(u) ^ 01))
+#define Unit_NotCond(u,c) ((reo_unit *)((long)(u) ^ (c)))
+#define Unit_IsConstant(u) ((int)((u)->lev == REO_CONST_LEVEL))
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+// ======================= reoApi.c ========================================
+extern reo_man * Extra_ReorderInit( int nDdVarsMax, int nNodesMax );
+extern void Extra_ReorderQuit( reo_man * p );
+extern void Extra_ReorderSetMinimizationType( reo_man * p, reo_min_type fMinType );
+extern void Extra_ReorderSetRemapping( reo_man * p, int fRemapUp );
+extern void Extra_ReorderSetIterations( reo_man * p, int nIters );
+extern void Extra_ReorderSetVerbosity( reo_man * p, int fVerbose );
+extern void Extra_ReorderSetVerification( reo_man * p, int fVerify );
+extern DdNode * Extra_Reorder( reo_man * p, DdManager * dd, DdNode * Func, int * pOrder );
+extern void Extra_ReorderArray( reo_man * p, DdManager * dd, DdNode * Funcs[], DdNode * FuncsRes[], int nFuncs, int * pOrder );
+// ======================= reoCore.c =======================================
+extern void reoReorderArray( reo_man * p, DdManager * dd, DdNode * Funcs[], DdNode * FuncsRes[], int nFuncs, int * pOrder );
+extern void reoResizeStructures( reo_man * p, int nDdVarsMax, int nNodesMax, int nFuncs );
+// ======================= reoProfile.c ======================================
+extern void reoProfileNodesStart( reo_man * p );
+extern void reoProfileAplStart( reo_man * p );
+extern void reoProfileWidthStart( reo_man * p );
+extern void reoProfileWidthStart2( reo_man * p );
+extern void reoProfileAplPrint( reo_man * p );
+extern void reoProfileNodesPrint( reo_man * p );
+extern void reoProfileWidthPrint( reo_man * p );
+extern void reoProfileWidthVerifyLevel( reo_plane * pPlane, int Level );
+// ======================= reoSift.c =======================================
+extern void reoReorderSift( reo_man * p );
+// ======================= reoSwap.c =======================================
+extern double reoReorderSwapAdjacentVars( reo_man * p, int Level, int fMovingUp );
+// ======================= reoTransfer.c ===================================
+extern reo_unit * reoTransferNodesToUnits_rec( reo_man * p, DdNode * F );
+extern DdNode * reoTransferUnitsToNodes_rec( reo_man * p, reo_unit * pUnit );
+// ======================= reoUnits.c ======================================
+extern reo_unit * reoUnitsGetNextUnit(reo_man * p );
+extern void reoUnitsRecycleUnit( reo_man * p, reo_unit * pUnit );
+extern void reoUnitsRecycleUnitList( reo_man * p, reo_plane * pPlane );
+extern void reoUnitsAddUnitToPlane( reo_plane * pPlane, reo_unit * pUnit );
+extern void reoUnitsStopDispenser( reo_man * p );
+// ======================= reoTest.c =======================================
+extern void Extra_ReorderTest( DdManager * dd, DdNode * Func );
+extern DdNode * Extra_ReorderCudd( DdManager * dd, DdNode * aFunc, int pPermuteReo[] );
+extern int Extra_bddReorderTest( DdManager * dd, DdNode * bF );
+extern int Extra_addReorderTest( DdManager * dd, DdNode * aF );
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
+#endif
diff --git a/src/bdd/reo/reoApi.c b/src/bdd/reo/reoApi.c
new file mode 100644
index 00000000..e833dabd
--- /dev/null
+++ b/src/bdd/reo/reoApi.c
@@ -0,0 +1,289 @@
+/**CFile****************************************************************
+
+ FileName [reoApi.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Implementation of API functions.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoApi.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis [Initializes the reordering engine.]
+
+ Description [The first argument is the max number of variables in the
+ CUDD DD manager which will be used with the reordering engine
+ (this number of should be the maximum of BDD and ZDD parts).
+ The second argument is the maximum number of BDD nodes in the BDDs
+ to be reordered. These limits are soft. Setting lower limits will later
+ cause the reordering manager to resize internal data structures.
+ However, setting the exact values will make reordering more efficient
+ because resizing will be not necessary.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+reo_man * Extra_ReorderInit( int nDdVarsMax, int nNodesMax )
+{
+ reo_man * p;
+ // allocate and clean the data structure
+ p = ALLOC( reo_man, 1 );
+ memset( p, 0, sizeof(reo_man) );
+ // resize the manager to meet user's needs
+ reoResizeStructures( p, nDdVarsMax, nNodesMax, 100 );
+ // set the defaults
+ p->fMinApl = 0;
+ p->fMinWidth = 0;
+ p->fRemapUp = 0;
+ p->fVerbose = 0;
+ p->fVerify = 0;
+ p->nIters = 1;
+ return p;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Disposes of the reordering engine.]
+
+ Description [Removes all memory associated with the reordering engine.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderQuit( reo_man * p )
+{
+ free( p->pTops );
+ free( p->pSupp );
+ free( p->pOrderInt );
+ free( p->pWidthCofs );
+ free( p->pMapToPlanes );
+ free( p->pMapToDdVarsOrig );
+ free( p->pMapToDdVarsFinal );
+ free( p->pPlanes );
+ free( p->pVarCosts );
+ free( p->pLevelOrder );
+ free( p->HTable );
+ free( p->pRefNodes );
+ reoUnitsStopDispenser( p );
+ free( p->pMemChunks );
+ free( p );
+}
+
+/**Function*************************************************************
+
+ Synopsis [Sets the type of DD minimizationl that will be performed.]
+
+ Description [Currently, three different types of minimization are supported.
+ It is possible to minimize the number of BDD nodes. This is a classical type
+ of minimization, which is attempting to reduce the total number of nodes in
+ the (shared) BDD of the given Boolean functions. It is also possible to
+ minimize the BDD width, defined as the sum total of the number of cofactors
+ on each level in the (shared) BDD (note that the number of cofactors on the
+ given level may be larger than the number of nodes appearing on the given level).
+ It is also possible to minimize the average path length in the (shared) BDD
+ defined as the sum of products, for all BDD paths from the top node to any
+ terminal node, of the number of minterms on the path by the number of nodes
+ on the path. The default reordering type is minimization for the number of
+ BDD nodes. Calling this function with REO_MINIMIZE_WIDTH or REO_MINIMIZE_APL
+ as the second argument, changes the default minimization option for all the
+ reorder calls performed afterwards.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderSetMinimizationType( reo_man * p, reo_min_type fMinType )
+{
+ if ( fMinType == REO_MINIMIZE_NODES )
+ {
+ p->fMinWidth = 0;
+ p->fMinApl = 0;
+ }
+ else if ( fMinType == REO_MINIMIZE_WIDTH )
+ {
+ p->fMinWidth = 1;
+ p->fMinApl = 0;
+ }
+ else if ( fMinType == REO_MINIMIZE_APL )
+ {
+ p->fMinWidth = 0;
+ p->fMinApl = 1;
+ }
+ else
+ {
+ assert( 0 );
+ }
+}
+
+/**Function*************************************************************
+
+ Synopsis [Sets the type of remapping performed by the engine.]
+
+ Description [The remapping refers to the way the resulting BDD
+ is expressed using the elementary variables of the CUDD BDD manager.
+ Currently, two types possibilities are supported: remapping and no
+ remapping. Remapping means that the function(s) after reordering
+ depend on the topmost variables in the manager. No remapping means
+ that the function(s) after reordering depend on the same variables
+ as before. Consider the following example. Suppose the initial four
+ variable function depends on variables 2,4,5, and 9 on the CUDD BDD
+ manager, which may be found anywhere in the current variable order.
+ If remapping is set, the function after ordering depends on the
+ topmost variables in the manager, which may or may not be the same
+ as the variables 2,4,5, and 9. If no remapping is set, then the
+ reordered function depend on the same variables 2,4,5, and 9, but
+ the meaning of each variale has changed according to the new ordering.
+ The resulting ordering is returned in the array "pOrder" filled out
+ by the reordering engine in the call to Extra_Reorder(). The default
+ is no remapping.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderSetRemapping( reo_man * p, int fRemapUp )
+{
+ p->fRemapUp = fRemapUp;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Sets the number of iterations of sifting performed.]
+
+ Description [The default is one iteration. But a higher minimization
+ quality is desired, it is possible to set the number of iterations
+ to any number larger than 1. Convergence is often reached after
+ several iterations, so typically it make no sense to set the number
+ of iterations higher than 3.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderSetIterations( reo_man * p, int nIters )
+{
+ p->nIters = nIters;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Sets the verification mode.]
+
+ Description [Setting the level to 1 results in verifying the results
+ of variable reordering. Verification is performed by remapping the
+ resulting functions into the original variable order and comparing
+ them with the original functions given by the user. Enabling verification
+ typically leads to 20-30% increase in the total runtime of REO.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderSetVerification( reo_man * p, int fVerify )
+{
+ p->fVerify = fVerify;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Sets the verbosity level.]
+
+ Description [Setting the level to 1 results in printing statistics
+ before and after the reordering.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderSetVerbosity( reo_man * p, int fVerbose )
+{
+ p->fVerbose = fVerbose;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Performs reordering of the function.]
+
+ Description [Returns the DD minimized by variable reordering in the REO
+ engine. Takes the CUDD decision diagram manager (dd) and the function (Func)
+ represented as a BDD or ADD (MTBDD). If the variable array (pOrder) is not NULL,
+ returns the resulting variable permutation. The permutation is such that if the resulting
+ function is permuted by Cudd_(add,bdd)Permute() using pOrder as the permutation
+ array, the initial function (Func) results.
+ Several flag set by other interface functions specify reordering options:
+ - Remappig can be set by Extra_ReorderSetRemapping(). Then the resulting DD after
+ reordering is remapped into the topmost levels of the DD manager. Otherwise,
+ the resulting DD after reordering is mapped using the same variables, on which it
+ originally depended, only (possibly) permuted as a result of reordering.
+ - Minimization type can be set by Extra_ReorderSetMinimizationType(). Note
+ that when the BDD is minimized for the total width of the total APL, the number
+ BDD nodes can increase. The total width is defines as sum total of widths on each
+ level. The width on one level is defined as the number of distinct BDD nodes
+ pointed by the nodes situated above the given level.
+ - The number of iterations of sifting can be set by Extra_ReorderSetIterations().
+ The decision diagram returned by this procedure is not referenced.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+DdNode * Extra_Reorder( reo_man * p, DdManager * dd, DdNode * Func, int * pOrder )
+{
+ DdNode * FuncRes;
+ Extra_ReorderArray( p, dd, &Func, &FuncRes, 1, pOrder );
+ Cudd_Deref( FuncRes );
+ return FuncRes;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Performs reordering of the array of functions.]
+
+ Description [The options are similar to the procedure Extra_Reorder(), except that
+ the user should also provide storage for the resulting DDs, which are returned
+ referenced.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderArray( reo_man * p, DdManager * dd, DdNode * Funcs[], DdNode * FuncsRes[], int nFuncs, int * pOrder )
+{
+ reoReorderArray( p, dd, Funcs, FuncsRes, nFuncs, pOrder );
+}
+
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoCore.c b/src/bdd/reo/reoCore.c
new file mode 100644
index 00000000..3782631c
--- /dev/null
+++ b/src/bdd/reo/reoCore.c
@@ -0,0 +1,438 @@
+/**CFile****************************************************************
+
+ FileName [reoCore.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Implementation of the core reordering procedure.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoCore.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+#define CALLOC(type, num) ((type *) calloc((long)(num), (long)sizeof(type)))
+
+static int reoRecursiveDeref( reo_unit * pUnit );
+static int reoCheckZeroRefs( reo_plane * pPlane );
+static int reoCheckLevels( reo_man * p );
+
+double s_AplBefore;
+double s_AplAfter;
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoReorderArray( reo_man * p, DdManager * dd, DdNode * Funcs[], DdNode * FuncsRes[], int nFuncs, int * pOrder )
+{
+ int Counter, i;
+
+ // set the initial parameters
+ p->dd = dd;
+ p->pOrder = pOrder;
+ p->nTops = nFuncs;
+ // get the initial number of nodes
+ p->nNodesBeg = Cudd_SharingSize( Funcs, nFuncs );
+ // resize the internal data structures of the manager if necessary
+ reoResizeStructures( p, ddMax(dd->size,dd->sizeZ), p->nNodesBeg, nFuncs );
+ // compute the support
+ p->pSupp = Extra_VectorSupportArray( dd, Funcs, nFuncs, p->pSupp );
+ // get the number of support variables
+ p->nSupp = 0;
+ for ( i = 0; i < dd->size; i++ )
+ p->nSupp += p->pSupp[i];
+
+ // if it is the constant function, no need to reorder
+ if ( p->nSupp == 0 )
+ {
+ for ( i = 0; i < nFuncs; i++ )
+ {
+ FuncsRes[i] = Funcs[i]; Cudd_Ref( FuncsRes[i] );
+ }
+ return;
+ }
+
+ // create the internal variable maps
+ // go through variable levels in the manager
+ Counter = 0;
+ for ( i = 0; i < dd->size; i++ )
+ if ( p->pSupp[ dd->invperm[i] ] )
+ {
+ p->pMapToPlanes[ dd->invperm[i] ] = Counter;
+ p->pMapToDdVarsOrig[Counter] = dd->invperm[i];
+ if ( !p->fRemapUp )
+ p->pMapToDdVarsFinal[Counter] = dd->invperm[i];
+ else
+ p->pMapToDdVarsFinal[Counter] = dd->invperm[Counter];
+ p->pOrderInt[Counter] = Counter;
+ Counter++;
+ }
+
+ // set the initial parameters
+ p->nUnitsUsed = 0;
+ p->nNodesCur = 0;
+ p->fThisIsAdd = 0;
+ p->Signature++;
+ // transfer the function from the CUDD package into REO"s internal data structure
+ for ( i = 0; i < nFuncs; i++ )
+ p->pTops[i] = reoTransferNodesToUnits_rec( p, Funcs[i] );
+ assert( p->nNodesBeg == p->nNodesCur );
+
+ if ( !p->fThisIsAdd && p->fMinWidth )
+ {
+ printf( "An important message from the REO reordering engine:\n" );
+ printf( "The BDD given to the engine for reordering contains complemented edges.\n" );
+ printf( "Currently, such BDDs cannot be reordered for the minimum width.\n" );
+ printf( "Therefore, minimization for the number of BDD nodes is performed.\n" );
+ fflush( stdout );
+ p->fMinApl = 0;
+ p->fMinWidth = 0;
+ }
+
+ if ( p->fMinWidth )
+ reoProfileWidthStart(p);
+ else if ( p->fMinApl )
+ reoProfileAplStart(p);
+ else
+ reoProfileNodesStart(p);
+
+ if ( p->fVerbose )
+ {
+ printf( "INITIAL: " );
+ if ( p->fMinWidth )
+ reoProfileWidthPrint(p);
+ else if ( p->fMinApl )
+ reoProfileAplPrint(p);
+ else
+ reoProfileNodesPrint(p);
+ }
+
+ ///////////////////////////////////////////////////////////////////
+ // performs the reordering
+ p->nSwaps = 0;
+ p->nNISwaps = 0;
+ for ( i = 0; i < p->nIters; i++ )
+ {
+ reoReorderSift( p );
+ // print statistics after each iteration
+ if ( p->fVerbose )
+ {
+ printf( "ITER #%d: ", i+1 );
+ if ( p->fMinWidth )
+ reoProfileWidthPrint(p);
+ else if ( p->fMinApl )
+ reoProfileAplPrint(p);
+ else
+ reoProfileNodesPrint(p);
+ }
+ // if the cost function did not change, stop iterating
+ if ( p->fMinWidth )
+ {
+ p->nWidthEnd = p->nWidthCur;
+ assert( p->nWidthEnd <= p->nWidthBeg );
+ if ( p->nWidthEnd == p->nWidthBeg )
+ break;
+ }
+ else if ( p->fMinApl )
+ {
+ p->nAplEnd = p->nAplCur;
+ assert( p->nAplEnd <= p->nAplBeg );
+ if ( p->nAplEnd == p->nAplBeg )
+ break;
+ }
+ else
+ {
+ p->nNodesEnd = p->nNodesCur;
+ assert( p->nNodesEnd <= p->nNodesBeg );
+ if ( p->nNodesEnd == p->nNodesBeg )
+ break;
+ }
+ }
+ assert( reoCheckLevels( p ) );
+ ///////////////////////////////////////////////////////////////////
+
+s_AplBefore = p->nAplBeg;
+s_AplAfter = p->nAplEnd;
+
+ // set the initial parameters
+ p->nRefNodes = 0;
+ p->nNodesCur = 0;
+ p->Signature++;
+ // transfer the BDDs from REO's internal data structure to CUDD
+ for ( i = 0; i < nFuncs; i++ )
+ {
+ FuncsRes[i] = reoTransferUnitsToNodes_rec( p, p->pTops[i] ); Cudd_Ref( FuncsRes[i] );
+ }
+ // undo the DDs referenced for storing in the cache
+ for ( i = 0; i < p->nRefNodes; i++ )
+ Cudd_RecursiveDeref( dd, p->pRefNodes[i] );
+ // verify zero refs of the terminal nodes
+ for ( i = 0; i < nFuncs; i++ )
+ {
+ assert( reoRecursiveDeref( p->pTops[i] ) );
+ }
+ assert( reoCheckZeroRefs( &(p->pPlanes[p->nSupp]) ) );
+
+ // prepare the variable map to return to the user
+ if ( p->pOrder )
+ {
+ // i is the current level in the planes data structure
+ // p->pOrderInt[i] is the original level in the planes data structure
+ // p->pMapToDdVarsOrig[i] is the variable, into which we remap when we construct the BDD from planes
+ // p->pMapToDdVarsOrig[ p->pOrderInt[i] ] is the original BDD variable corresponding to this level
+ // Therefore, p->pOrder[ p->pMapToDdVarsFinal[i] ] = p->pMapToDdVarsOrig[ p->pOrderInt[i] ]
+ // creates the permutation, which remaps the resulting BDD variable into the original BDD variable
+ for ( i = 0; i < p->nSupp; i++ )
+ p->pOrder[ p->pMapToDdVarsFinal[i] ] = p->pMapToDdVarsOrig[ p->pOrderInt[i] ];
+ }
+
+ if ( p->fVerify )
+ {
+ int fVerification;
+ DdNode * FuncRemapped;
+ int * pOrder;
+
+ if ( p->pOrder == NULL )
+ {
+ pOrder = ALLOC( int, p->nSupp );
+ for ( i = 0; i < p->nSupp; i++ )
+ pOrder[ p->pMapToDdVarsFinal[i] ] = p->pMapToDdVarsOrig[ p->pOrderInt[i] ];
+ }
+ else
+ pOrder = p->pOrder;
+
+ fVerification = 1;
+ for ( i = 0; i < nFuncs; i++ )
+ {
+ // verify the result
+ if ( p->fThisIsAdd )
+ FuncRemapped = Cudd_addPermute( dd, FuncsRes[i], pOrder );
+ else
+ FuncRemapped = Cudd_bddPermute( dd, FuncsRes[i], pOrder );
+ Cudd_Ref( FuncRemapped );
+
+ if ( FuncRemapped != Funcs[i] )
+ {
+ fVerification = 0;
+ printf( "REO: Internal verification has failed!\n" );
+ fflush( stdout );
+ }
+ Cudd_RecursiveDeref( dd, FuncRemapped );
+ }
+ if ( fVerification )
+ printf( "REO: Internal verification is okay!\n" );
+
+ if ( p->pOrder == NULL )
+ free( pOrder );
+ }
+
+ // recycle the data structure
+ for ( i = 0; i <= p->nSupp; i++ )
+ reoUnitsRecycleUnitList( p, p->pPlanes + i );
+}
+
+/**Function*************************************************************
+
+ Synopsis [Resizes the internal manager data structures.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoResizeStructures( reo_man * p, int nDdVarsMax, int nNodesMax, int nFuncs )
+{
+ // resize data structures depending on the number of variables in the DD manager
+ if ( p->nSuppAlloc == 0 )
+ {
+ p->pSupp = ALLOC( int, nDdVarsMax + 1 );
+ p->pOrderInt = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToPlanes = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToDdVarsOrig = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToDdVarsFinal = ALLOC( int, nDdVarsMax + 1 );
+ p->pPlanes = CALLOC( reo_plane, nDdVarsMax + 1 );
+ p->pVarCosts = ALLOC( double, nDdVarsMax + 1 );
+ p->pLevelOrder = ALLOC( int, nDdVarsMax + 1 );
+ p->nSuppAlloc = nDdVarsMax + 1;
+ }
+ else if ( p->nSuppAlloc < nDdVarsMax )
+ {
+ free( p->pSupp );
+ free( p->pOrderInt );
+ free( p->pMapToPlanes );
+ free( p->pMapToDdVarsOrig );
+ free( p->pMapToDdVarsFinal );
+ free( p->pPlanes );
+ free( p->pVarCosts );
+ free( p->pLevelOrder );
+
+ p->pSupp = ALLOC( int, nDdVarsMax + 1 );
+ p->pOrderInt = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToPlanes = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToDdVarsOrig = ALLOC( int, nDdVarsMax + 1 );
+ p->pMapToDdVarsFinal = ALLOC( int, nDdVarsMax + 1 );
+ p->pPlanes = CALLOC( reo_plane, nDdVarsMax + 1 );
+ p->pVarCosts = ALLOC( double, nDdVarsMax + 1 );
+ p->pLevelOrder = ALLOC( int, nDdVarsMax + 1 );
+ p->nSuppAlloc = nDdVarsMax + 1;
+ }
+
+ // resize the data structures depending on the number of nodes
+ if ( p->nRefNodesAlloc == 0 )
+ {
+ p->nNodesMaxAlloc = nNodesMax;
+ p->nTableSize = 3*nNodesMax + 1;
+ p->nRefNodesAlloc = 3*nNodesMax + 1;
+ p->nMemChunksAlloc = (10*nNodesMax + 1)/REO_CHUNK_SIZE + 1;
+
+ p->HTable = CALLOC( reo_hash, p->nTableSize );
+ p->pRefNodes = ALLOC( DdNode *, p->nRefNodesAlloc );
+ p->pWidthCofs = ALLOC( reo_unit *, p->nRefNodesAlloc );
+ p->pMemChunks = ALLOC( reo_unit *, p->nMemChunksAlloc );
+ }
+ else if ( p->nNodesMaxAlloc < nNodesMax )
+ {
+ void * pTemp;
+ int nMemChunksAllocPrev = p->nMemChunksAlloc;
+
+ p->nNodesMaxAlloc = nNodesMax;
+ p->nTableSize = 3*nNodesMax + 1;
+ p->nRefNodesAlloc = 3*nNodesMax + 1;
+ p->nMemChunksAlloc = (10*nNodesMax + 1)/REO_CHUNK_SIZE + 1;
+
+ free( p->HTable );
+ free( p->pRefNodes );
+ free( p->pWidthCofs );
+ p->HTable = CALLOC( reo_hash, p->nTableSize );
+ p->pRefNodes = ALLOC( DdNode *, p->nRefNodesAlloc );
+ p->pWidthCofs = ALLOC( reo_unit *, p->nRefNodesAlloc );
+ // p->pMemChunks should be reallocated because it contains pointers currently in use
+ pTemp = ALLOC( reo_unit *, p->nMemChunksAlloc );
+ memmove( pTemp, p->pMemChunks, sizeof(reo_unit *) * nMemChunksAllocPrev );
+ free( p->pMemChunks );
+ p->pMemChunks = pTemp;
+ }
+
+ // resize the data structures depending on the number of functions
+ if ( p->nTopsAlloc == 0 )
+ {
+ p->pTops = ALLOC( reo_unit *, nFuncs );
+ p->nTopsAlloc = nFuncs;
+ }
+ else if ( p->nTopsAlloc < nFuncs )
+ {
+ free( p->pTops );
+ p->pTops = ALLOC( reo_unit *, nFuncs );
+ p->nTopsAlloc = nFuncs;
+ }
+}
+
+
+/**Function*************************************************************
+
+ Synopsis [Dereferences units the data structure after reordering.]
+
+ Description [This function is only useful for debugging.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+int reoRecursiveDeref( reo_unit * pUnit )
+{
+ reo_unit * pUnitR;
+ pUnitR = Unit_Regular(pUnit);
+ pUnitR->n--;
+ if ( Unit_IsConstant(pUnitR) )
+ return 1;
+ if ( pUnitR->n == 0 )
+ {
+ reoRecursiveDeref( pUnitR->pE );
+ reoRecursiveDeref( pUnitR->pT );
+ }
+ return 1;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Checks the zero references for the given plane.]
+
+ Description [This function is only useful for debugging.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+int reoCheckZeroRefs( reo_plane * pPlane )
+{
+ reo_unit * pUnit;
+ for ( pUnit = pPlane->pHead; pUnit; pUnit = pUnit->Next )
+ {
+ if ( pUnit->n != 0 )
+ {
+ assert( 0 );
+ }
+ }
+ return 1;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Checks the zero references for the given plane.]
+
+ Description [This function is only useful for debugging.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+int reoCheckLevels( reo_man * p )
+{
+ reo_unit * pUnit;
+ int i;
+
+ for ( i = 0; i < p->nSupp; i++ )
+ {
+ // there are some nodes left on each level
+ assert( p->pPlanes[i].statsNodes );
+ for ( pUnit = p->pPlanes[i].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ // the level is properly set
+ assert( pUnit->lev == i );
+ }
+ }
+ return 1;
+}
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoProfile.c b/src/bdd/reo/reoProfile.c
new file mode 100644
index 00000000..b38575f0
--- /dev/null
+++ b/src/bdd/reo/reoProfile.c
@@ -0,0 +1,365 @@
+/**CFile****************************************************************
+
+ FileName [reoProfile.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Procudures that compute variables profiles (nodes, width, APL).]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoProfile.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+
+/**Function********************************************************************
+
+ Synopsis [Start the profile for the BDD nodes.]
+
+ Description [TopRef is the first level, on this the given node counts towards
+ the width of the BDDs. (In other words, it is the level of the referencing node plus 1.)]
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileNodesStart( reo_man * p )
+{
+ int Total, i;
+ Total = 0;
+ for ( i = 0; i <= p->nSupp; i++ )
+ {
+ p->pPlanes[i].statsCost = p->pPlanes[i].statsNodes;
+ Total += p->pPlanes[i].statsNodes;
+ }
+ assert( Total == p->nNodesCur );
+ p->nNodesBeg = p->nNodesCur;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Start the profile for the APL.]
+
+ Description [Computes the total path length. The path length is normalized
+ by dividing it by 2^|supp(f)|. To get the "real" APL, multiply by 2^|supp(f)|.
+ This procedure assumes that Weight field of all nodes has been set to 0.0
+ before the call, except for the weight of the topmost node, which is set to 1.0
+ (1.0 is the probability of traversing the topmost node). This procedure
+ assigns the edge weights. Because of the equal probability of selecting 0 and 1
+ assignment at a node, the edge weights are the same for the node.
+ Instead of storing them, we store the weight of the node, which is the probability
+ of traversing the node (pUnit->Weight) during the top down evalation of the BDD. ]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoProfileAplStart( reo_man * p )
+{
+ reo_unit * pER, * pTR;
+ reo_unit * pUnit;
+ double Res, Half;
+ int i;
+
+ // clean the weights of all nodes
+ for ( i = 0; i < p->nSupp; i++ )
+ for ( pUnit = p->pPlanes[i].pHead; pUnit; pUnit = pUnit->Next )
+ pUnit->Weight = 0.0;
+ // to assign the node weights (the probability of visiting each node)
+ // we visit the node after visiting its predecessors
+
+ // set the probability of visits to the top nodes
+ for ( i = 0; i < p->nTops; i++ )
+ Unit_Regular(p->pTops[i])->Weight += 1.0;
+
+ // to compute the path length (the sum of products of edge weight by edge length)
+ // we visit the nodes in any order (the above order will do)
+ Res = 0.0;
+ for ( i = 0; i < p->nSupp; i++ )
+ {
+ p->pPlanes[i].statsCost = 0.0;
+ for ( pUnit = p->pPlanes[i].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ pER = Unit_Regular(pUnit->pE);
+ pTR = Unit_Regular(pUnit->pT);
+ Half = 0.5 * pUnit->Weight;
+ pER->Weight += Half;
+ pTR->Weight += Half;
+ // add to the path length
+ p->pPlanes[i].statsCost += pUnit->Weight;
+ }
+ Res += p->pPlanes[i].statsCost;
+ }
+ p->pPlanes[p->nSupp].statsCost = 0.0;
+ p->nAplBeg = p->nAplCur = Res;
+}
+
+/**Function********************************************************************
+
+ Synopsis [Start the profile for the BDD width. Complexity of the algorithm is O(N + n).]
+
+ Description [TopRef is the first level, on which the given node counts towards
+ the width of the BDDs. (In other words, it is the level of the referencing node plus 1.)]
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileWidthStart( reo_man * p )
+{
+ reo_unit * pUnit;
+ int * pWidthStart;
+ int * pWidthStop;
+ int v;
+
+ // allocate and clean the storage for starting and stopping levels
+ pWidthStart = ALLOC( int, p->nSupp + 1 );
+ pWidthStop = ALLOC( int, p->nSupp + 1 );
+ memset( pWidthStart, 0, sizeof(int) * (p->nSupp + 1) );
+ memset( pWidthStop, 0, sizeof(int) * (p->nSupp + 1) );
+
+ // go through the non-constant nodes and set the topmost level of their cofactors
+ for ( v = 0; v <= p->nSupp; v++ )
+ for ( pUnit = p->pPlanes[v].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ pUnit->TopRef = REO_TOPREF_UNDEF;
+ pUnit->Sign = 0;
+ }
+
+ // add the topmost level of the width profile
+ for ( v = 0; v < p->nTops; v++ )
+ {
+ pUnit = Unit_Regular(p->pTops[v]);
+ if ( pUnit->TopRef == REO_TOPREF_UNDEF )
+ {
+ // set the starting level
+ pUnit->TopRef = 0;
+ pWidthStart[pUnit->TopRef]++;
+ // set the stopping level
+ if ( pUnit->lev != REO_CONST_LEVEL )
+ pWidthStop[pUnit->lev+1]++;
+ }
+ }
+
+ for ( v = 0; v < p->nSupp; v++ )
+ for ( pUnit = p->pPlanes[v].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ if ( pUnit->pE->TopRef == REO_TOPREF_UNDEF )
+ {
+ // set the starting level
+ pUnit->pE->TopRef = pUnit->lev + 1;
+ pWidthStart[pUnit->pE->TopRef]++;
+ // set the stopping level
+ if ( pUnit->pE->lev != REO_CONST_LEVEL )
+ pWidthStop[pUnit->pE->lev+1]++;
+ }
+ if ( pUnit->pT->TopRef == REO_TOPREF_UNDEF )
+ {
+ // set the starting level
+ pUnit->pT->TopRef = pUnit->lev + 1;
+ pWidthStart[pUnit->pT->TopRef]++;
+ // set the stopping level
+ if ( pUnit->pT->lev != REO_CONST_LEVEL )
+ pWidthStop[pUnit->pT->lev+1]++;
+ }
+ }
+
+ // verify the top reference
+ for ( v = 0; v < p->nSupp; v++ )
+ reoProfileWidthVerifyLevel( p->pPlanes + v, v );
+
+ // derive the profile
+ p->nWidthCur = 0;
+ for ( v = 0; v <= p->nSupp; v++ )
+ {
+ if ( v == 0 )
+ p->pPlanes[v].statsWidth = pWidthStart[v] - pWidthStop[v];
+ else
+ p->pPlanes[v].statsWidth = p->pPlanes[v-1].statsWidth + pWidthStart[v] - pWidthStop[v];
+ p->pPlanes[v].statsCost = p->pPlanes[v].statsWidth;
+ p->nWidthCur += p->pPlanes[v].statsWidth;
+// printf( "Level %2d: Width = %5d. Correct = %d.\n", v, Temp, p->pPlanes[v].statsWidth );
+ }
+ p->nWidthBeg = p->nWidthCur;
+ free( pWidthStart );
+ free( pWidthStop );
+}
+
+/**Function********************************************************************
+
+ Synopsis [Start the profile for the BDD width. Complexity of the algorithm is O(N * n).]
+
+ Description [TopRef is the first level, on which the given node counts towards
+ the width of the BDDs. (In other words, it is the level of the referencing node plus 1.)]
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileWidthStart2( reo_man * p )
+{
+ reo_unit * pUnit;
+ int i, v;
+
+ // clean the profile
+ for ( i = 0; i <= p->nSupp; i++ )
+ p->pPlanes[i].statsWidth = 0;
+
+ // clean the node structures
+ for ( v = 0; v <= p->nSupp; v++ )
+ for ( pUnit = p->pPlanes[v].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ pUnit->TopRef = REO_TOPREF_UNDEF;
+ pUnit->Sign = 0;
+ }
+
+ // set the topref to the topmost nodes
+ for ( i = 0; i < p->nTops; i++ )
+ Unit_Regular(p->pTops[i])->TopRef = 0;
+
+ // go through the non-constant nodes and set the topmost level of their cofactors
+ for ( i = 0; i < p->nSupp; i++ )
+ for ( pUnit = p->pPlanes[i].pHead; pUnit; pUnit = pUnit->Next )
+ {
+ if ( pUnit->pE->TopRef > i+1 )
+ pUnit->pE->TopRef = i+1;
+ if ( pUnit->pT->TopRef > i+1 )
+ pUnit->pT->TopRef = i+1;
+ }
+
+ // verify the top reference
+ for ( i = 0; i < p->nSupp; i++ )
+ reoProfileWidthVerifyLevel( p->pPlanes + i, i );
+
+ // compute the profile for the internal nodes
+ for ( i = 0; i < p->nSupp; i++ )
+ for ( pUnit = p->pPlanes[i].pHead; pUnit; pUnit = pUnit->Next )
+ for ( v = pUnit->TopRef; v <= pUnit->lev; v++ )
+ p->pPlanes[v].statsWidth++;
+
+ // compute the profile for the constant nodes
+ for ( pUnit = p->pPlanes[p->nSupp].pHead; pUnit; pUnit = pUnit->Next )
+ for ( v = pUnit->TopRef; v <= p->nSupp; v++ )
+ p->pPlanes[v].statsWidth++;
+
+ // get the width cost
+ p->nWidthCur = 0;
+ for ( i = 0; i <= p->nSupp; i++ )
+ {
+ p->pPlanes[i].statsCost = p->pPlanes[i].statsWidth;
+ p->nWidthCur += p->pPlanes[i].statsWidth;
+ }
+ p->nWidthBeg = p->nWidthCur;
+}
+
+/**Function********************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileNodesPrint( reo_man * p )
+{
+ printf( "NODES: Total = %6d. Average = %6.2f.\n", p->nNodesCur, p->nNodesCur / (float)p->nSupp );
+}
+
+/**Function********************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileAplPrint( reo_man * p )
+{
+ printf( "APL: Total = %8.2f. Average =%6.2f.\n", p->nAplCur, p->nAplCur / (float)p->nSupp );
+}
+
+/**Function********************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileWidthPrint( reo_man * p )
+{
+ int WidthMax;
+ int TotalWidth;
+ int i;
+
+ WidthMax = 0;
+ TotalWidth = 0;
+ for ( i = 0; i <= p->nSupp; i++ )
+ {
+// printf( "Level = %2d. Width = %3d.\n", i, p->pProfile[i] );
+ if ( WidthMax < p->pPlanes[i].statsWidth )
+ WidthMax = p->pPlanes[i].statsWidth;
+ TotalWidth += p->pPlanes[i].statsWidth;
+ }
+ assert( p->nWidthCur = TotalWidth );
+ printf( "WIDTH: " );
+ printf( "Maximum = %5d. ", WidthMax );
+ printf( "Total = %7d. ", p->nWidthCur );
+ printf( "Average = %6.2f.\n", TotalWidth / (float)p->nSupp );
+}
+
+/**Function********************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+******************************************************************************/
+void reoProfileWidthVerifyLevel( reo_plane * pPlane, int Level )
+{
+ reo_unit * pUnit;
+ for ( pUnit = pPlane->pHead; pUnit; pUnit = pUnit->Next )
+ {
+ assert( pUnit->TopRef <= Level );
+ assert( pUnit->pE->TopRef <= Level + 1 );
+ assert( pUnit->pT->TopRef <= Level + 1 );
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoSift.c b/src/bdd/reo/reoSift.c
new file mode 100644
index 00000000..93d82f08
--- /dev/null
+++ b/src/bdd/reo/reoSift.c
@@ -0,0 +1,341 @@
+/**CFile****************************************************************
+
+ FileName [reoSift.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Implementation of the sifting algorihtm.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoSift.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis [Implements the variable sifting algorithm.]
+
+ Description [Performs a sequence of adjacent variable swaps known as "sifting".
+ Uses the cost functions determined by the flag.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoReorderSift( reo_man * p )
+{
+ double CostCurrent; // the cost of the current permutation
+ double CostLimit; // the maximum increase in cost that can be tolerated
+ double CostBest; // the best cost
+ int BestQ; // the best position
+ int VarCurrent; // the current variable to move
+ int q; // denotes the current position of the variable
+ int c; // performs the loops over variables until all of them are sifted
+ int v; // used for other purposes
+
+ assert( p->nSupp > 0 );
+
+ // set the current cost depending on the minimization criteria
+ if ( p->fMinWidth )
+ CostCurrent = p->nWidthCur;
+ else if ( p->fMinApl )
+ CostCurrent = p->nAplCur;
+ else
+ CostCurrent = p->nNodesCur;
+
+ // find the upper bound on tbe cost growth
+ CostLimit = 1 + (int)(REO_REORDER_LIMIT * CostCurrent);
+
+ // perform sifting for each of p->nSupp variables
+ for ( c = 0; c < p->nSupp; c++ )
+ {
+ // select the current variable to be the one with the largest number of nodes that is not sifted yet
+ VarCurrent = -1;
+ CostBest = -1.0;
+ for ( v = 0; v < p->nSupp; v++ )
+ {
+ p->pVarCosts[v] = REO_HIGH_VALUE;
+ if ( !p->pPlanes[v].fSifted )
+ {
+// VarCurrent = v;
+// if ( CostBest < p->pPlanes[v].statsCost )
+ if ( CostBest < p->pPlanes[v].statsNodes )
+ {
+// CostBest = p->pPlanes[v].statsCost;
+ CostBest = p->pPlanes[v].statsNodes;
+ VarCurrent = v;
+ }
+
+ }
+ }
+ assert( VarCurrent != -1 );
+ // mark this variable as sifted
+ p->pPlanes[VarCurrent].fSifted = 1;
+
+ // set the current value
+ p->pVarCosts[VarCurrent] = CostCurrent;
+
+ // set the best cost
+ CostBest = CostCurrent;
+ BestQ = VarCurrent;
+
+ // determine which way to move the variable first (up or down)
+ // the rationale is that if we move the shorter way first
+ // it is more likely that the best position will be found on the longer way
+ // and the reverse movement (to take the best position) will be faster
+ if ( VarCurrent < p->nSupp/2 ) // move up first, then down
+ {
+ // set the total cost on all levels above the current level
+ p->pPlanes[0].statsCostAbove = 0;
+ for ( v = 1; v <= VarCurrent; v++ )
+ p->pPlanes[v].statsCostAbove = p->pPlanes[v-1].statsCostAbove + p->pPlanes[v-1].statsCost;
+ // set the total cost on all levels below the current level
+ p->pPlanes[p->nSupp].statsCostBelow = 0;
+ for ( v = p->nSupp - 1; v >= VarCurrent; v-- )
+ p->pPlanes[v].statsCostBelow = p->pPlanes[v+1].statsCostBelow + p->pPlanes[v+1].statsCost;
+
+ assert( CostCurrent == p->pPlanes[VarCurrent].statsCostAbove +
+ p->pPlanes[VarCurrent].statsCost +
+ p->pPlanes[VarCurrent].statsCostBelow );
+
+ // move up
+ for ( q = VarCurrent-1; q >= 0; q-- )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q, 1 );
+ // now q points to the position of this var in the order
+ p->pVarCosts[q] = CostCurrent;
+ // update the lower bound (assuming that for level q+1 it is set correctly)
+ p->pPlanes[q].statsCostBelow = p->pPlanes[q+1].statsCostBelow + p->pPlanes[q+1].statsCost;
+ // check the upper bound
+ if ( CostCurrent >= CostLimit )
+ break;
+ // check the lower bound
+ if ( p->pPlanes[q].statsCostBelow + (REO_QUAL_PAR-1)*p->pPlanes[q].statsCostAbove/REO_QUAL_PAR >= CostBest )
+ break;
+ // update the best cost
+ if ( CostBest > CostCurrent )
+ {
+ CostBest = CostCurrent;
+ BestQ = q;
+ // adjust node limit
+ CostLimit = ddMin( CostLimit, 1 + (int)(REO_REORDER_LIMIT * CostCurrent) );
+ }
+
+ // when we are reordering for width or APL, it may happen that
+ // the number of nodes has grown above certain limit,
+ // in which case we have to resize the data structures
+ if ( p->fMinWidth || p->fMinApl )
+ {
+ if ( p->nNodesCur >= 2 * p->nNodesMaxAlloc )
+ {
+// printf( "Resizing data structures. Old size = %6d. New size = %6d.\n", p->nNodesMaxAlloc, p->nNodesCur );
+ reoResizeStructures( p, 0, p->nNodesCur, 0 );
+ }
+ }
+ }
+ // fix the plane index
+ if ( q == -1 )
+ q++;
+ // now p points to the position of this var in the order
+
+ // move down
+ for ( ; q < p->nSupp-1; )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q, 0 );
+ q++; // change q to point to the position of this var in the order
+ // sanity check: the number of nodes on the back pass should be the same
+ if ( p->pVarCosts[q] != REO_HIGH_VALUE && fabs( p->pVarCosts[q] - CostCurrent ) > REO_COST_EPSILON )
+ printf("reoReorderSift(): Error! On the backward move, the costs are different.\n");
+ p->pVarCosts[q] = CostCurrent;
+ // update the lower bound (assuming that for level q-1 it is set correctly)
+ p->pPlanes[q].statsCostAbove = p->pPlanes[q-1].statsCostAbove + p->pPlanes[q-1].statsCost;
+ // check the bounds only if the variable already reached its previous position
+ if ( q >= BestQ )
+ {
+ // check the upper bound
+ if ( CostCurrent >= CostLimit )
+ break;
+ // check the lower bound
+ if ( p->pPlanes[q].statsCostAbove + (REO_QUAL_PAR-1)*p->pPlanes[q].statsCostBelow/REO_QUAL_PAR >= CostBest )
+ break;
+ }
+ // update the best cost
+ if ( CostBest >= CostCurrent )
+ {
+ CostBest = CostCurrent;
+ BestQ = q;
+ // adjust node limit
+ CostLimit = ddMin( CostLimit, 1 + (int)(REO_REORDER_LIMIT * CostCurrent) );
+ }
+
+ // when we are reordering for width or APL, it may happen that
+ // the number of nodes has grown above certain limit,
+ // in which case we have to resize the data structures
+ if ( p->fMinWidth || p->fMinApl )
+ {
+ if ( p->nNodesCur >= 2 * p->nNodesMaxAlloc )
+ {
+// printf( "Resizing data structures. Old size = %6d. New size = %6d.\n", p->nNodesMaxAlloc, p->nNodesCur );
+ reoResizeStructures( p, 0, p->nNodesCur, 0 );
+ }
+ }
+ }
+ // move the variable up from the given position (q) to the best position (BestQ)
+ assert( q >= BestQ );
+ for ( ; q > BestQ; q-- )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q-1, 1 );
+ // sanity check: the number of nodes on the back pass should be the same
+ if ( fabs( p->pVarCosts[q-1] - CostCurrent ) > REO_COST_EPSILON )
+ {
+ printf("reoReorderSift(): Error! On the return move, the costs are different.\n" );
+ fflush(stdout);
+ }
+ }
+ }
+ else // move down first, then up
+ {
+ // set the current number of nodes on all levels above the given level
+ p->pPlanes[0].statsCostAbove = 0;
+ for ( v = 1; v <= VarCurrent; v++ )
+ p->pPlanes[v].statsCostAbove = p->pPlanes[v-1].statsCostAbove + p->pPlanes[v-1].statsCost;
+ // set the current number of nodes on all levels below the given level
+ p->pPlanes[p->nSupp].statsCostBelow = 0;
+ for ( v = p->nSupp - 1; v >= VarCurrent; v-- )
+ p->pPlanes[v].statsCostBelow = p->pPlanes[v+1].statsCostBelow + p->pPlanes[v+1].statsCost;
+
+ assert( CostCurrent == p->pPlanes[VarCurrent].statsCostAbove +
+ p->pPlanes[VarCurrent].statsCost +
+ p->pPlanes[VarCurrent].statsCostBelow );
+
+ // move down
+ for ( q = VarCurrent; q < p->nSupp-1; )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q, 0 );
+ q++; // change q to point to the position of this var in the order
+ p->pVarCosts[q] = CostCurrent;
+ // update the lower bound (assuming that for level q-1 it is set correctly)
+ p->pPlanes[q].statsCostAbove = p->pPlanes[q-1].statsCostAbove + p->pPlanes[q-1].statsCost;
+ // check the upper bound
+ if ( CostCurrent >= CostLimit )
+ break;
+ // check the lower bound
+ if ( p->pPlanes[q].statsCostAbove + (REO_QUAL_PAR-1)*p->pPlanes[q].statsCostBelow/REO_QUAL_PAR >= CostBest )
+ break;
+ // update the best cost
+ if ( CostBest > CostCurrent )
+ {
+ CostBest = CostCurrent;
+ BestQ = q;
+ // adjust node limit
+ CostLimit = ddMin( CostLimit, 1 + (int)(REO_REORDER_LIMIT * CostCurrent) );
+ }
+
+ // when we are reordering for width or APL, it may happen that
+ // the number of nodes has grown above certain limit,
+ // in which case we have to resize the data structures
+ if ( p->fMinWidth || p->fMinApl )
+ {
+ if ( p->nNodesCur >= 2 * p->nNodesMaxAlloc )
+ {
+// printf( "Resizing data structures. Old size = %6d. New size = %6d.\n", p->nNodesMaxAlloc, p->nNodesCur );
+ reoResizeStructures( p, 0, p->nNodesCur, 0 );
+ }
+ }
+ }
+
+ // move up
+ for ( --q; q >= 0; q-- )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q, 1 );
+ // now q points to the position of this var in the order
+ // sanity check: the number of nodes on the back pass should be the same
+ if ( p->pVarCosts[q] != REO_HIGH_VALUE && fabs( p->pVarCosts[q] - CostCurrent ) > REO_COST_EPSILON )
+ printf("reoReorderSift(): Error! On the backward move, the costs are different.\n");
+ p->pVarCosts[q] = CostCurrent;
+ // update the lower bound (assuming that for level q+1 it is set correctly)
+ p->pPlanes[q].statsCostBelow = p->pPlanes[q+1].statsCostBelow + p->pPlanes[q+1].statsCost;
+ // check the bounds only if the variable already reached its previous position
+ if ( q <= BestQ )
+ {
+ // check the upper bound
+ if ( CostCurrent >= CostLimit )
+ break;
+ // check the lower bound
+ if ( p->pPlanes[q].statsCostBelow + (REO_QUAL_PAR-1)*p->pPlanes[q].statsCostAbove/REO_QUAL_PAR >= CostBest )
+ break;
+ }
+ // update the best cost
+ if ( CostBest >= CostCurrent )
+ {
+ CostBest = CostCurrent;
+ BestQ = q;
+ // adjust node limit
+ CostLimit = ddMin( CostLimit, 1 + (int)(REO_REORDER_LIMIT * CostCurrent) );
+ }
+
+ // when we are reordering for width or APL, it may happen that
+ // the number of nodes has grown above certain limit,
+ // in which case we have to resize the data structures
+ if ( p->fMinWidth || p->fMinApl )
+ {
+ if ( p->nNodesCur >= 2 * p->nNodesMaxAlloc )
+ {
+// printf( "Resizing data structures. Old size = %6d. New size = %6d.\n", p->nNodesMaxAlloc, p->nNodesCur );
+ reoResizeStructures( p, 0, p->nNodesCur, 0 );
+ }
+ }
+ }
+ // fix the plane index
+ if ( q == -1 )
+ q++;
+ // now q points to the position of this var in the order
+ // move the variable down from the given position (q) to the best position (BestQ)
+ assert( q <= BestQ );
+ for ( ; q < BestQ; q++ )
+ {
+ CostCurrent -= reoReorderSwapAdjacentVars( p, q, 0 );
+ // sanity check: the number of nodes on the back pass should be the same
+ if ( fabs( p->pVarCosts[q+1] - CostCurrent ) > REO_COST_EPSILON )
+ {
+ printf("reoReorderSift(): Error! On the return move, the costs are different.\n" );
+ fflush(stdout);
+ }
+ }
+ }
+ assert( fabs( CostBest - CostCurrent ) < REO_COST_EPSILON );
+
+ // update the cost
+ if ( p->fMinWidth )
+ p->nWidthCur = (int)CostBest;
+ else if ( p->fMinApl )
+ p->nAplCur = CostCurrent;
+ else
+ p->nNodesCur = (int)CostBest;
+ }
+
+ // remove the sifted attributes if any
+ for ( v = 0; v < p->nSupp; v++ )
+ p->pPlanes[v].fSifted = 0;
+}
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoSwap.c b/src/bdd/reo/reoSwap.c
new file mode 100644
index 00000000..cb730d8e
--- /dev/null
+++ b/src/bdd/reo/reoSwap.c
@@ -0,0 +1,898 @@
+/**CFile****************************************************************
+
+ FileName [reoSwap.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Implementation of the two-variable swap.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoSwap.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+#define AddToLinkedList( ppList, pLink ) (((pLink)->Next = *(ppList)), (*(ppList) = (pLink)))
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis []
+
+ Description [Takes the level (lev0) of the plane, which should be swapped
+ with the next plane. Returns the gain using the current cost function.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+double reoReorderSwapAdjacentVars( reo_man * p, int lev0, int fMovingUp )
+{
+ // the levels in the decision diagram
+ int lev1 = lev0 + 1, lev2 = lev0 + 2;
+ // the new nodes on lev0
+ reo_unit * pLoop, * pUnit;
+ // the new nodes on lev1
+ reo_unit * pNewPlane20, * pNewPlane21, * pNewPlane20R;
+ reo_unit * pUnitE, * pUnitER, * pUnitT;
+ // the nodes below lev1
+ reo_unit * pNew1E, * pNew1T, * pNew2E, * pNew2T;
+ reo_unit * pNew1ER, * pNew2ER;
+ // the old linked lists
+ reo_unit * pListOld0 = p->pPlanes[lev0].pHead;
+ reo_unit * pListOld1 = p->pPlanes[lev1].pHead;
+ // working planes and one more temporary plane
+ reo_unit * pListNew0 = NULL, ** ppListNew0 = &pListNew0;
+ reo_unit * pListNew1 = NULL, ** ppListNew1 = &pListNew1;
+ reo_unit * pListTemp = NULL, ** ppListTemp = &pListTemp;
+ // various integer variables
+ int fComp, fCompT, fFound, nWidthCofs, HKey, fInteract, temp, c;
+ // statistical variables
+ int nNodesUpMovedDown = 0;
+ int nNodesDownMovedUp = 0;
+ int nNodesUnrefRemoved = 0;
+ int nNodesUnrefAdded = 0;
+ int nWidthReduction = 0;
+ double AplWeightTotalLev0;
+ double AplWeightTotalLev1;
+ double AplWeightHalf;
+ double AplWeightPrev;
+ double AplWeightAfter;
+ double nCostGain;
+
+ // set the old lists
+ assert( lev0 >= 0 && lev1 < p->nSupp );
+ pListOld0 = p->pPlanes[lev0].pHead;
+ pListOld1 = p->pPlanes[lev1].pHead;
+
+ // make sure the planes have nodes
+ assert( p->pPlanes[lev0].statsNodes && p->pPlanes[lev1].statsNodes );
+ assert( pListOld0 && pListOld1 );
+
+ if ( p->fMinWidth )
+ {
+ // verify that the width parameters are set correctly
+ reoProfileWidthVerifyLevel( p->pPlanes + lev0, lev0 );
+ reoProfileWidthVerifyLevel( p->pPlanes + lev1, lev1 );
+ // start the storage for cofactors
+ nWidthCofs = 0;
+ }
+ else if ( p->fMinApl )
+ {
+ AplWeightPrev = p->nAplCur;
+ AplWeightAfter = p->nAplCur;
+ AplWeightTotalLev0 = 0.0;
+ AplWeightTotalLev1 = 0.0;
+ }
+
+ // check if the planes interact
+ fInteract = 0; // assume that they do not interact
+ for ( pUnit = pListOld0; pUnit; pUnit = pUnit->Next )
+ {
+ if ( pUnit->pT->lev == lev1 || Unit_Regular(pUnit->pE)->lev == lev1 )
+ {
+ fInteract = 1;
+ break;
+ }
+ // change the level now, this is done for efficiency reasons
+ pUnit->lev = lev1;
+ }
+
+ // set the new signature for hashing
+ p->nSwaps++;
+ if ( !fInteract )
+// if ( 0 )
+ {
+ // perform the swap without interaction
+ p->nNISwaps++;
+
+ // change the levels
+ if ( p->fMinWidth )
+ {
+ // go through the current lower level, which will become upper
+ for ( pUnit = pListOld1; pUnit; pUnit = pUnit->Next )
+ {
+ pUnit->lev = lev0;
+
+ pUnitER = Unit_Regular(pUnit->pE);
+ if ( pUnitER->TopRef > lev0 )
+ {
+ if ( pUnitER->Sign != p->nSwaps )
+ {
+ if ( pUnitER->TopRef == lev2 )
+ {
+ pUnitER->TopRef = lev1;
+ nWidthReduction--;
+ }
+ else
+ {
+ assert( pUnitER->TopRef == lev1 );
+ }
+ pUnitER->Sign = p->nSwaps;
+ }
+ }
+
+ pUnitT = pUnit->pT;
+ if ( pUnitT->TopRef > lev0 )
+ {
+ if ( pUnitT->Sign != p->nSwaps )
+ {
+ if ( pUnitT->TopRef == lev2 )
+ {
+ pUnitT->TopRef = lev1;
+ nWidthReduction--;
+ }
+ else
+ {
+ assert( pUnitT->TopRef == lev1 );
+ }
+ pUnitT->Sign = p->nSwaps;
+ }
+ }
+
+ }
+
+ // go through the current upper level, which will become lower
+ for ( pUnit = pListOld0; pUnit; pUnit = pUnit->Next )
+ {
+ pUnit->lev = lev1;
+
+ pUnitER = Unit_Regular(pUnit->pE);
+ if ( pUnitER->TopRef > lev0 )
+ {
+ if ( pUnitER->Sign != p->nSwaps )
+ {
+ assert( pUnitER->TopRef == lev1 );
+ pUnitER->TopRef = lev2;
+ pUnitER->Sign = p->nSwaps;
+ nWidthReduction++;
+ }
+ }
+
+ pUnitT = pUnit->pT;
+ if ( pUnitT->TopRef > lev0 )
+ {
+ if ( pUnitT->Sign != p->nSwaps )
+ {
+ assert( pUnitT->TopRef == lev1 );
+ pUnitT->TopRef = lev2;
+ pUnitT->Sign = p->nSwaps;
+ nWidthReduction++;
+ }
+ }
+ }
+ }
+ else
+ {
+// for ( pUnit = pListOld0; pUnit; pUnit = pUnit->Next )
+// pUnit->lev = lev1;
+ for ( pUnit = pListOld1; pUnit; pUnit = pUnit->Next )
+ pUnit->lev = lev0;
+ }
+
+ // set the new linked lists, which will be attached to the planes
+ pListNew0 = pListOld1;
+ pListNew1 = pListOld0;
+
+ if ( p->fMinApl )
+ {
+ AplWeightTotalLev0 = p->pPlanes[lev1].statsCost;
+ AplWeightTotalLev1 = p->pPlanes[lev0].statsCost;
+ }
+
+ // set the changes in terms of nodes
+ nNodesUpMovedDown = p->pPlanes[lev0].statsNodes;
+ nNodesDownMovedUp = p->pPlanes[lev1].statsNodes;
+ goto finish;
+ }
+ p->Signature++;
+
+
+ // two-variable swap is done in three easy steps
+ // previously I thought that steps (1) and (2) can be merged into one step
+ // now it is clear that this cannot be done without changing a lot of other stuff...
+
+ // (1) walk through the upper level, find units without cofactors in the lower level
+ // and move them to the new lower level (while adding to the cache)
+ // (2) walk through the uppoer level, and tranform all the remaning nodes
+ // while employing cache for the new lower level
+ // (3) walk through the old lower level, find those nodes whose ref counters are not zero,
+ // and move them to the new uppoer level, free other nodes
+
+ // (1) walk through the upper level, find units without cofactors in the lower level
+ // and move them to the new lower level (while adding to the cache)
+ for ( pLoop = pListOld0; pLoop; )
+ {
+ pUnit = pLoop;
+ pLoop = pLoop->Next;
+
+ pUnitE = pUnit->pE;
+ pUnitER = Unit_Regular(pUnitE);
+ pUnitT = pUnit->pT;
+
+ if ( pUnitER->lev != lev1 && pUnitT->lev != lev1 )
+ {
+ // before after
+ //
+ // <p1>
+ // 0 / \ 1
+ // / \
+ // / \
+ // / \ <p2n>
+ // / \ 0 / \ 1
+ // / \ / \
+ // / \ / \
+ // F0 F1 F0 F1
+
+ // move to plane-2-new
+ // nothing changes in the process (cofactors, ref counter, APL weight)
+ pUnit->lev = lev1;
+ AddToLinkedList( ppListNew1, pUnit );
+ if ( p->fMinApl )
+ AplWeightTotalLev1 += pUnit->Weight;
+
+ // add to cache - find the cell with different signature (not the current one!)
+ for ( HKey = hashKey3(p->Signature, pUnitE, pUnitT, p->nTableSize);
+ p->HTable[HKey].Sign == p->Signature;
+ HKey = (HKey+1) % p->nTableSize );
+ assert( p->HTable[HKey].Sign != p->Signature );
+ p->HTable[HKey].Sign = p->Signature;
+ p->HTable[HKey].Arg1 = (unsigned)pUnitE;
+ p->HTable[HKey].Arg2 = (unsigned)pUnitT;
+ p->HTable[HKey].Arg3 = (unsigned)pUnit;
+
+ nNodesUpMovedDown++;
+
+ if ( p->fMinWidth )
+ {
+ // update the cofactors's top ref
+ if ( pUnitER->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ assert( pUnitER->TopRef == lev1 );
+ pUnitER->TopRefNew = lev2;
+ if ( pUnitER->Sign != p->nSwaps )
+ {
+ pUnitER->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pUnitER;
+ }
+ }
+ if ( pUnitT->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ assert( pUnitT->TopRef == lev1 );
+ pUnitT->TopRefNew = lev2;
+ if ( pUnitT->Sign != p->nSwaps )
+ {
+ pUnitT->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pUnitT;
+ }
+ }
+ }
+ }
+ else
+ {
+ // add to the temporary plane
+ AddToLinkedList( ppListTemp, pUnit );
+ }
+ }
+
+
+ // (2) walk through the uppoer level, and tranform all the remaning nodes
+ // while employing cache for the new lower level
+ for ( pLoop = pListTemp; pLoop; )
+ {
+ pUnit = pLoop;
+ pLoop = pLoop->Next;
+
+ pUnitE = pUnit->pE;
+ pUnitER = Unit_Regular(pUnitE);
+ pUnitT = pUnit->pT;
+ fComp = (int)(pUnitER != pUnitE);
+
+ // count the amount of weight to reduce the APL of the children of this node
+ if ( p->fMinApl )
+ AplWeightHalf = 0.5 * pUnit->Weight;
+
+ // determine what situation is this
+ if ( pUnitER->lev == lev1 && pUnitT->lev == lev1 )
+ {
+ if ( fComp == 0 )
+ {
+ // before after
+ //
+ // <p1> <p1n>
+ // 0 / \ 1 0 / \ 1
+ // / \ / \
+ // / \ / \
+ // <p2> <p2> <p2n> <p2n>
+ // 0 / \ 1 0 / \ 1 0 / \ 1 0 / \ 1
+ // / \ / \ / \ / \
+ // / \ / \ / \ / \
+ // F0 F1 F2 F3 F0 F2 F1 F3
+ // pNew1E pNew1T pNew2E pNew2T
+ //
+ pNew1E = pUnitE->pE; // F0
+ pNew1T = pUnitT->pE; // F2
+
+ pNew2E = pUnitE->pT; // F1
+ pNew2T = pUnitT->pT; // F3
+ }
+ else
+ {
+ // before after
+ //
+ // <p1> <p1n>
+ // 0 . \ 1 0 / \ 1
+ // . \ / \
+ // . \ / \
+ // <p2> <p2> <p2n> <p2n>
+ // 0 / \ 1 0 / \ 1 0 . \ 1 0 . \ 1
+ // / \ / \ . \ . \
+ // / \ / \ . \ . \
+ // F0 F1 F2 F3 F0 F2 F1 F3
+ // pNew1E pNew1T pNew2E pNew2T
+ //
+ pNew1E = Unit_Not(pUnitER->pE); // F0
+ pNew1T = pUnitT->pE; // F2
+
+ pNew2E = Unit_Not(pUnitER->pT); // F1
+ pNew2T = pUnitT->pT; // F3
+ }
+ // subtract ref counters - on the level P2
+ pUnitER->n--;
+ pUnitT->n--;
+
+ // mark the change in the APL weights
+ if ( p->fMinApl )
+ {
+ pUnitER->Weight -= AplWeightHalf;
+ pUnitT->Weight -= AplWeightHalf;
+ AplWeightAfter -= pUnit->Weight;
+ }
+ }
+ else if ( pUnitER->lev == lev1 )
+ {
+ if ( fComp == 0 )
+ {
+ // before after
+ //
+ // <p1> <p1n>
+ // 0 / \ 1 0 / \ 1
+ // / \ / \
+ // / \ / \
+ // <p2> \ <p2n> <p2n>
+ // 0 / \ 1 \ 0 / \ 1 0 / \ 1
+ // / \ \ / \ / \
+ // / \ \ / \ / \
+ // F0 F1 F3 F0 F3 F1 F3
+ // pNew1E pNew1T pNew2E pNew2T
+ //
+ pNew1E = pUnitER->pE; // F0
+ pNew1T = pUnitT; // F3
+
+ pNew2E = pUnitER->pT; // F1
+ pNew2T = pUnitT; // F3
+ }
+ else
+ {
+ // before after
+ //
+ // <p1> <p1n>
+ // 0 . \ 1 0 / \ 1
+ // . \ / \
+ // . \ / \
+ // <p2> \ <p2n> <p2n>
+ // 0 / \ 1 \ 0 . \ 1 0 . \ 1
+ // / \ \ . \ . \
+ // / \ \ . \ . \
+ // F0 F1 F3 F0 F3 F1 F3
+ // pNew1E pNew1T pNew2E pNew2T
+ //
+ pNew1E = Unit_Not(pUnitER->pE); // F0
+ pNew1T = pUnitT; // F3
+
+ pNew2E = Unit_Not(pUnitER->pT); // F1
+ pNew2T = pUnitT; // F3
+ }
+ // subtract ref counter - on the level P2
+ pUnitER->n--;
+ // subtract ref counter - on other levels
+ pUnitT->n--; ///
+
+ // mark the change in the APL weights
+ if ( p->fMinApl )
+ {
+ pUnitER->Weight -= AplWeightHalf;
+ AplWeightAfter -= AplWeightHalf;
+ }
+ }
+ else if ( pUnitT->lev == lev1 )
+ {
+ // before after
+ //
+ // <p1> <p1n>
+ // 0 / \ 1 0 / \ 1
+ // / \ / \
+ // / \ / \
+ // / <p2> <p2n> <p2n>
+ // / 0 / \ 1 0 / \ 1 0 / \ 1
+ // / / \ / \ / \
+ // / / \ / \ / \
+ // F0 F2 F3 F0 F2 F0 F3
+ // pNew1E pNew1T pNew2E pNew2T
+ //
+ pNew1E = pUnitE; // F0
+ pNew1T = pUnitT->pE; // F2
+
+ pNew2E = pUnitE; // F0
+ pNew2T = pUnitT->pT; // F3
+
+ // subtract incoming edge counter - on the level P2
+ pUnitT->n--;
+ // subtract ref counter - on other levels
+ pUnitER->n--; ///
+
+ // mark the change in the APL weights
+ if ( p->fMinApl )
+ {
+ pUnitT->Weight -= AplWeightHalf;
+ AplWeightAfter -= AplWeightHalf;
+ }
+ }
+ else
+ {
+ assert( 0 ); // should never happen
+ }
+
+
+ // consider all the cases except the last one
+ if ( pNew1E == pNew1T )
+ {
+ pNewPlane20 = pNew1T;
+
+ if ( p->fMinWidth )
+ {
+ // update the cofactors's top ref
+ if ( pNew1T->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ pNew1T->TopRefNew = lev1;
+ if ( pNew1T->Sign != p->nSwaps )
+ {
+ pNew1T->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew1T;
+ }
+ }
+ }
+ }
+ else
+ {
+ // pNew1T can be complemented
+ fCompT = Cudd_IsComplement(pNew1T);
+ if ( fCompT )
+ {
+ pNew1E = Unit_Not(pNew1E);
+ pNew1T = Unit_Not(pNew1T);
+ }
+
+ // check the hash-table
+ fFound = 0;
+ for ( HKey = hashKey3(p->Signature, pNew1E, pNew1T, p->nTableSize);
+ p->HTable[HKey].Sign == p->Signature;
+ HKey = (HKey+1) % p->nTableSize )
+ if ( p->HTable[HKey].Arg1 == (unsigned)pNew1E && p->HTable[HKey].Arg2 == (unsigned)pNew1T )
+ { // the entry is present
+ // assign this entry
+ pNewPlane20 = (reo_unit *)p->HTable[HKey].Arg3;
+ assert( pNewPlane20->lev == lev1 );
+ fFound = 1;
+ p->HashSuccess++;
+ break;
+ }
+
+ if ( !fFound )
+ { // create the new entry
+ pNewPlane20 = reoUnitsGetNextUnit( p ); // increments the unit counter
+ pNewPlane20->pE = pNew1E;
+ pNewPlane20->pT = pNew1T;
+ pNewPlane20->n = 0; // ref will be added later
+ pNewPlane20->lev = lev1;
+ if ( p->fMinWidth )
+ {
+ pNewPlane20->TopRef = lev1;
+ pNewPlane20->Sign = 0;
+ }
+ // set the weight of this node
+ if ( p->fMinApl )
+ pNewPlane20->Weight = 0.0;
+
+ // increment ref counters of children
+ pNew1ER = Unit_Regular(pNew1E);
+ pNew1ER->n++; //
+ pNew1T->n++; //
+
+ // insert into the data structure
+ AddToLinkedList( ppListNew1, pNewPlane20 );
+
+ // add this entry to cache
+ assert( p->HTable[HKey].Sign != p->Signature );
+ p->HTable[HKey].Sign = p->Signature;
+ p->HTable[HKey].Arg1 = (unsigned)pNew1E;
+ p->HTable[HKey].Arg2 = (unsigned)pNew1T;
+ p->HTable[HKey].Arg3 = (unsigned)pNewPlane20;
+
+ nNodesUnrefAdded++;
+
+ if ( p->fMinWidth )
+ {
+ // update the cofactors's top ref
+ if ( pNew1ER->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ if ( pNew1ER->Sign != p->nSwaps )
+ {
+ pNew1ER->TopRefNew = lev2;
+ if ( pNew1ER->Sign != p->nSwaps )
+ {
+ pNew1ER->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew1ER;
+ }
+ }
+ // otherwise the level is already set correctly
+ else
+ {
+ assert( pNew1ER->TopRefNew == lev1 || pNew1ER->TopRefNew == lev2 );
+ }
+ }
+ // update the cofactors's top ref
+ if ( pNew1T->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ if ( pNew1T->Sign != p->nSwaps )
+ {
+ pNew1T->TopRefNew = lev2;
+ if ( pNew1T->Sign != p->nSwaps )
+ {
+ pNew1T->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew1T;
+ }
+ }
+ // otherwise the level is already set correctly
+ else
+ {
+ assert( pNew1T->TopRefNew == lev1 || pNew1T->TopRefNew == lev2 );
+ }
+ }
+ }
+ }
+
+ if ( p->fMinApl )
+ {
+ // increment the weight of this node
+ pNewPlane20->Weight += AplWeightHalf;
+ // mark the change in the APL weight
+ AplWeightAfter += AplWeightHalf;
+ // update the total weight of this level
+ AplWeightTotalLev1 += AplWeightHalf;
+ }
+
+ if ( fCompT )
+ pNewPlane20 = Unit_Not(pNewPlane20);
+ }
+
+ if ( pNew2E == pNew2T )
+ {
+ pNewPlane21 = pNew2T;
+
+ if ( p->fMinWidth )
+ {
+ // update the cofactors's top ref
+ if ( pNew2T->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ pNew2T->TopRefNew = lev1;
+ if ( pNew2T->Sign != p->nSwaps )
+ {
+ pNew2T->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew2T;
+ }
+ }
+ }
+ }
+ else
+ {
+ assert( !Cudd_IsComplement(pNew2T) );
+
+ // check the hash-table
+ fFound = 0;
+ for ( HKey = hashKey3(p->Signature, pNew2E, pNew2T, p->nTableSize);
+ p->HTable[HKey].Sign == p->Signature;
+ HKey = (HKey+1) % p->nTableSize )
+ if ( p->HTable[HKey].Arg1 == (unsigned)pNew2E && p->HTable[HKey].Arg2 == (unsigned)pNew2T )
+ { // the entry is present
+ // assign this entry
+ pNewPlane21 = (reo_unit *)p->HTable[HKey].Arg3;
+ assert( pNewPlane21->lev == lev1 );
+ fFound = 1;
+ p->HashSuccess++;
+ break;
+ }
+
+ if ( !fFound )
+ { // create the new entry
+ pNewPlane21 = reoUnitsGetNextUnit( p ); // increments the unit counter
+ pNewPlane21->pE = pNew2E;
+ pNewPlane21->pT = pNew2T;
+ pNewPlane21->n = 0; // ref will be added later
+ pNewPlane21->lev = lev1;
+ if ( p->fMinWidth )
+ {
+ pNewPlane21->TopRef = lev1;
+ pNewPlane21->Sign = 0;
+ }
+ // set the weight of this node
+ if ( p->fMinApl )
+ pNewPlane21->Weight = 0.0;
+
+ // increment ref counters of children
+ pNew2ER = Unit_Regular(pNew2E);
+ pNew2ER->n++; //
+ pNew2T->n++; //
+
+ // insert into the data structure
+// reoUnitsAddUnitToPlane( &P2new, pNewPlane21 );
+ AddToLinkedList( ppListNew1, pNewPlane21 );
+
+ // add this entry to cache
+ assert( p->HTable[HKey].Sign != p->Signature );
+ p->HTable[HKey].Sign = p->Signature;
+ p->HTable[HKey].Arg1 = (unsigned)pNew2E;
+ p->HTable[HKey].Arg2 = (unsigned)pNew2T;
+ p->HTable[HKey].Arg3 = (unsigned)pNewPlane21;
+
+ nNodesUnrefAdded++;
+
+
+ if ( p->fMinWidth )
+ {
+ // update the cofactors's top ref
+ if ( pNew2ER->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ if ( pNew2ER->Sign != p->nSwaps )
+ {
+ pNew2ER->TopRefNew = lev2;
+ if ( pNew2ER->Sign != p->nSwaps )
+ {
+ pNew2ER->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew2ER;
+ }
+ }
+ // otherwise the level is already set correctly
+ else
+ {
+ assert( pNew2ER->TopRefNew == lev1 || pNew2ER->TopRefNew == lev2 );
+ }
+ }
+ // update the cofactors's top ref
+ if ( pNew2T->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ if ( pNew2T->Sign != p->nSwaps )
+ {
+ pNew2T->TopRefNew = lev2;
+ if ( pNew2T->Sign != p->nSwaps )
+ {
+ pNew2T->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pNew2T;
+ }
+ }
+ // otherwise the level is already set correctly
+ else
+ {
+ assert( pNew2T->TopRefNew == lev1 || pNew2T->TopRefNew == lev2 );
+ }
+ }
+ }
+ }
+
+ if ( p->fMinApl )
+ {
+ // increment the weight of this node
+ pNewPlane21->Weight += AplWeightHalf;
+ // mark the change in the APL weight
+ AplWeightAfter += AplWeightHalf;
+ // update the total weight of this level
+ AplWeightTotalLev1 += AplWeightHalf;
+ }
+ }
+ // in all cases, the node will be added to the plane-1
+ // this should be the same node (pUnit) as was originally there
+ // because it is referenced by the above nodes
+
+ assert( !Cudd_IsComplement(pNewPlane21) );
+ // should be the case; otherwise reordering is not a local operation
+
+ pUnit->pE = pNewPlane20;
+ pUnit->pT = pNewPlane21;
+ assert( pUnit->lev == lev0 );
+ // reference counter remains the same; the APL weight remains the same
+
+ // increment ref counters of children
+ pNewPlane20R = Unit_Regular(pNewPlane20);
+ pNewPlane20R->n++; ///
+ pNewPlane21->n++; ///
+
+ // insert into the data structure
+ AddToLinkedList( ppListNew0, pUnit );
+ if ( p->fMinApl )
+ AplWeightTotalLev0 += pUnit->Weight;
+ }
+
+ // (3) walk through the old lower level, find those nodes whose ref counters are not zero,
+ // and move them to the new uppoer level, free other nodes
+ for ( pLoop = pListOld1; pLoop; )
+ {
+ pUnit = pLoop;
+ pLoop = pLoop->Next;
+ if ( pUnit->n )
+ {
+ assert( !p->fMinApl || pUnit->Weight > 0.0 );
+ // the node should be added to the new level
+ // no need to check the hash table
+ pUnit->lev = lev0;
+ AddToLinkedList( ppListNew0, pUnit );
+ if ( p->fMinApl )
+ AplWeightTotalLev0 += pUnit->Weight;
+
+ nNodesDownMovedUp++;
+
+ if ( p->fMinWidth )
+ {
+ pUnitER = Unit_Regular(pUnit->pE);
+ pUnitT = pUnit->pT;
+
+ // update the cofactors's top ref
+ if ( pUnitER->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ pUnitER->TopRefNew = lev1;
+ if ( pUnitER->Sign != p->nSwaps )
+ {
+ pUnitER->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pUnitER;
+ }
+ }
+ if ( pUnitT->TopRef > lev0 ) // the cofactor's top ref level is one of the current two levels
+ {
+ pUnitT->TopRefNew = lev1;
+ if ( pUnitT->Sign != p->nSwaps )
+ {
+ pUnitT->Sign = p->nSwaps; // set the current signature
+ p->pWidthCofs[ nWidthCofs++ ] = pUnitT;
+ }
+ }
+ }
+ }
+ else
+ {
+ assert( !p->fMinApl || pUnit->Weight == 0.0 );
+ // decrement reference counters of children
+ pUnitER = Unit_Regular(pUnit->pE);
+ pUnitT = pUnit->pT;
+ pUnitER->n--; ///
+ pUnitT->n--; ///
+ // the node should be thrown away
+ reoUnitsRecycleUnit( p, pUnit );
+ nNodesUnrefRemoved++;
+ }
+ }
+
+finish:
+
+ // attach the new levels to the planes
+ p->pPlanes[lev0].pHead = pListNew0;
+ p->pPlanes[lev1].pHead = pListNew1;
+
+ // swap the sift status
+ temp = p->pPlanes[lev0].fSifted;
+ p->pPlanes[lev0].fSifted = p->pPlanes[lev1].fSifted;
+ p->pPlanes[lev1].fSifted = temp;
+
+ // swap variables in the variable map
+ if ( p->pOrderInt )
+ {
+ temp = p->pOrderInt[lev0];
+ p->pOrderInt[lev0] = p->pOrderInt[lev1];
+ p->pOrderInt[lev1] = temp;
+ }
+
+ // adjust the node profile
+ p->pPlanes[lev0].statsNodes -= (nNodesUpMovedDown - nNodesDownMovedUp);
+ p->pPlanes[lev1].statsNodes -= (nNodesDownMovedUp - nNodesUpMovedDown) + nNodesUnrefRemoved - nNodesUnrefAdded;
+ p->nNodesCur -= nNodesUnrefRemoved - nNodesUnrefAdded;
+
+ // adjust the node profile on this level
+ if ( p->fMinWidth )
+ {
+ for ( c = 0; c < nWidthCofs; c++ )
+ {
+ if ( p->pWidthCofs[c]->TopRefNew < p->pWidthCofs[c]->TopRef )
+ {
+ p->pWidthCofs[c]->TopRef = p->pWidthCofs[c]->TopRefNew;
+ nWidthReduction--;
+ }
+ else if ( p->pWidthCofs[c]->TopRefNew > p->pWidthCofs[c]->TopRef )
+ {
+ p->pWidthCofs[c]->TopRef = p->pWidthCofs[c]->TopRefNew;
+ nWidthReduction++;
+ }
+ }
+ // verify that the profile is okay
+ reoProfileWidthVerifyLevel( p->pPlanes + lev0, lev0 );
+ reoProfileWidthVerifyLevel( p->pPlanes + lev1, lev1 );
+
+ // compute the total gain in terms of width
+ nCostGain = (nNodesDownMovedUp - nNodesUpMovedDown + nNodesUnrefRemoved - nNodesUnrefAdded) + nWidthReduction;
+ // adjust the width on this level
+ p->pPlanes[lev1].statsWidth -= (int)nCostGain;
+ // set the cost
+ p->pPlanes[lev1].statsCost = p->pPlanes[lev1].statsWidth;
+ }
+ else if ( p->fMinApl )
+ {
+ // compute the total gain in terms of APL
+ nCostGain = AplWeightPrev - AplWeightAfter;
+ // make sure that the ALP is updated correctly
+// assert( p->pPlanes[lev0].statsCost + p->pPlanes[lev1].statsCost - nCostGain ==
+// AplWeightTotalLev0 + AplWeightTotalLev1 );
+ // adjust the profile
+ p->pPlanes[lev0].statsApl = AplWeightTotalLev0;
+ p->pPlanes[lev1].statsApl = AplWeightTotalLev1;
+ // set the cost
+ p->pPlanes[lev0].statsCost = p->pPlanes[lev0].statsApl;
+ p->pPlanes[lev1].statsCost = p->pPlanes[lev1].statsApl;
+ }
+ else
+ {
+ // compute the total gain in terms of the number of nodes
+ nCostGain = nNodesUnrefRemoved - nNodesUnrefAdded;
+ // adjust the profile (adjusted above)
+ // set the cost
+ p->pPlanes[lev0].statsCost = p->pPlanes[lev0].statsNodes;
+ p->pPlanes[lev1].statsCost = p->pPlanes[lev1].statsNodes;
+ }
+
+ return nCostGain;
+}
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoTest.c b/src/bdd/reo/reoTest.c
new file mode 100644
index 00000000..82f3d5f5
--- /dev/null
+++ b/src/bdd/reo/reoTest.c
@@ -0,0 +1,251 @@
+/**CFile****************************************************************
+
+ FileName [reoTest.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Various testing procedures (may be outdated).]
+
+ Author [Alan Mishchenko <alanmi@ece.pdx.edu>]
+
+ Affiliation [ECE Department. Portland State University, Portland, Oregon.]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoTest.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis [Reorders the DD using REO and CUDD.]
+
+ Description [This function can be used to test the performance of the reordering package.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderTest( DdManager * dd, DdNode * Func )
+{
+ reo_man * pReo;
+ DdNode * Temp, * Temp1;
+ int pOrder[1000];
+
+ pReo = Extra_ReorderInit( 100, 100 );
+
+//Extra_DumpDot( dd, &Func, 1, "beforReo.dot", 0 );
+ Temp = Extra_Reorder( pReo, dd, Func, pOrder ); Cudd_Ref( Temp );
+//Extra_DumpDot( dd, &Temp, 1, "afterReo.dot", 0 );
+
+ Temp1 = Extra_ReorderCudd(dd, Func, NULL ); Cudd_Ref( Temp1 );
+printf( "Initial = %d. Final = %d. Cudd = %d.\n", Cudd_DagSize(Func), Cudd_DagSize(Temp), Cudd_DagSize(Temp1) );
+ Cudd_RecursiveDeref( dd, Temp1 );
+ Cudd_RecursiveDeref( dd, Temp );
+
+ Extra_ReorderQuit( pReo );
+}
+
+
+/**Function*************************************************************
+
+ Synopsis [Reorders the DD using REO and CUDD.]
+
+ Description [This function can be used to test the performance of the reordering package.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void Extra_ReorderTestArray( DdManager * dd, DdNode * Funcs[], int nFuncs )
+{
+ reo_man * pReo;
+ DdNode * FuncsRes[1000];
+ int pOrder[1000];
+ int i;
+
+ pReo = Extra_ReorderInit( 100, 100 );
+ Extra_ReorderArray( pReo, dd, Funcs, FuncsRes, nFuncs, pOrder );
+ Extra_ReorderQuit( pReo );
+
+printf( "Initial = %d. Final = %d.\n", Cudd_SharingSize(Funcs,nFuncs), Cudd_SharingSize(FuncsRes,nFuncs) );
+
+ for ( i = 0; i < nFuncs; i++ )
+ Cudd_RecursiveDeref( dd, FuncsRes[i] );
+
+}
+
+/**Function*************************************************************
+
+ Synopsis [Reorders the DD using CUDD package.]
+
+ Description [Transfers the DD into a temporary manager in such a way
+ that the level correspondence is preserved. Reorders the manager
+ and transfers the DD back into the original manager using the topmost
+ levels of the manager, in such a way that the ordering of levels is
+ preserved. The resulting permutation is returned in the array
+ given by the user.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+DdNode * Extra_ReorderCudd( DdManager * dd, DdNode * aFunc, int pPermuteReo[] )
+{
+ static DdManager * ddReorder = NULL;
+ static int * Permute = NULL;
+ static int * PermuteReo1 = NULL;
+ static int * PermuteReo2 = NULL;
+ DdNode * aFuncReorder, * aFuncNew;
+ int lev, var;
+
+ // start the reordering manager
+ if ( ddReorder == NULL )
+ {
+ Permute = ALLOC( int, dd->size );
+ PermuteReo1 = ALLOC( int, dd->size );
+ PermuteReo2 = ALLOC( int, dd->size );
+ ddReorder = Cudd_Init( dd->size, 0, CUDD_UNIQUE_SLOTS, CUDD_CACHE_SLOTS, 0 );
+ Cudd_AutodynDisable(ddReorder);
+ }
+
+ // determine the permutation of variable to make sure that var order in bFunc
+ // will not change when this function is transfered into the new manager
+ for ( lev = 0; lev < dd->size; lev++ )
+ {
+ Permute[ dd->invperm[lev] ] = ddReorder->invperm[lev];
+ PermuteReo1[ ddReorder->invperm[lev] ] = dd->invperm[lev];
+ }
+ // transfer this function into the new manager in such a way that ordering of vars does not change
+ aFuncReorder = Extra_TransferPermute( dd, ddReorder, aFunc, Permute ); Cudd_Ref( aFuncReorder );
+// assert( Cudd_DagSize(aFunc) == Cudd_DagSize(aFuncReorder) );
+
+ // perform the reordering
+printf( "Nodes before = %d.\n", Cudd_DagSize(aFuncReorder) );
+ Cudd_ReduceHeap( ddReorder, CUDD_REORDER_SYMM_SIFT, 1 );
+printf( "Nodes before = %d.\n", Cudd_DagSize(aFuncReorder) );
+
+ // determine the reverse variable permutation
+ for ( lev = 0; lev < dd->size; lev++ )
+ {
+ Permute[ ddReorder->invperm[lev] ] = dd->invperm[lev];
+ PermuteReo2[ dd->invperm[lev] ] = ddReorder->invperm[lev];
+ }
+
+ // transfer this function into the new manager in such a way that ordering of vars does not change
+ aFuncNew = Extra_TransferPermute( ddReorder, dd, aFuncReorder, Permute ); Cudd_Ref( aFuncNew );
+// assert( Cudd_DagSize(aFuncNew) == Cudd_DagSize(aFuncReorder) );
+ Cudd_RecursiveDeref( ddReorder, aFuncReorder );
+
+ // derive the resulting variable ordering
+ if ( pPermuteReo )
+ for ( var = 0; var < dd->size; var++ )
+ pPermuteReo[var] = PermuteReo1[ PermuteReo2[var] ];
+
+ Cudd_Deref( aFuncNew );
+ return aFuncNew;
+}
+
+
+/**Function*************************************************************
+
+ Synopsis []
+
+ Description [Transfers the BDD into another manager minimizes it and
+ returns the min number of nodes; disposes of the BDD in the new manager.
+ Useful for debugging or comparing the performance of other reordering
+ procedures.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+int Extra_bddReorderTest( DdManager * dd, DdNode * bF )
+{
+ static DdManager * s_ddmin;
+ DdNode * bFmin;
+ int nNodes;
+// int clk1;
+
+ if ( s_ddmin == NULL )
+ s_ddmin = Cudd_Init( dd->size, 0, CUDD_UNIQUE_SLOTS, CUDD_CACHE_SLOTS, 0);
+
+// Cudd_ShuffleHeap( s_ddmin, dd->invperm );
+
+// clk1 = clock();
+ bFmin = Cudd_bddTransfer( dd, s_ddmin, bF ); Cudd_Ref( bFmin );
+ Cudd_ReduceHeap(s_ddmin,CUDD_REORDER_SIFT,1);
+// Cudd_ReduceHeap(s_ddmin,CUDD_REORDER_SYMM_SIFT,1);
+ nNodes = Cudd_DagSize( bFmin );
+ Cudd_RecursiveDeref( s_ddmin, bFmin );
+
+// printf( "Classical variable reordering time = %.2f sec\n", (float)(clock() - clk1)/(float)(CLOCKS_PER_SEC) );
+ return nNodes;
+}
+
+/**Function*************************************************************
+
+ Synopsis []
+
+ Description [Transfers the ADD into another manager minimizes it and
+ returns the min number of nodes; disposes of the BDD in the new manager.
+ Useful for debugging or comparing the performance of other reordering
+ procedures.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+int Extra_addReorderTest( DdManager * dd, DdNode * aF )
+{
+ static DdManager * s_ddmin;
+ DdNode * bF;
+ DdNode * bFmin;
+ DdNode * aFmin;
+ int nNodesBeg;
+ int nNodesEnd;
+ int clk1;
+
+ if ( s_ddmin == NULL )
+ s_ddmin = Cudd_Init( dd->size, 0, CUDD_UNIQUE_SLOTS, CUDD_CACHE_SLOTS, 0);
+
+// Cudd_ShuffleHeap( s_ddmin, dd->invperm );
+
+ clk1 = clock();
+ bF = Cudd_addBddPattern( dd, aF ); Cudd_Ref( bF );
+ bFmin = Cudd_bddTransfer( dd, s_ddmin, bF ); Cudd_Ref( bFmin );
+ Cudd_RecursiveDeref( dd, bF );
+ aFmin = Cudd_BddToAdd( s_ddmin, bFmin ); Cudd_Ref( aFmin );
+ Cudd_RecursiveDeref( s_ddmin, bFmin );
+
+ nNodesBeg = Cudd_DagSize( aFmin );
+ Cudd_ReduceHeap(s_ddmin,CUDD_REORDER_SIFT,1);
+// Cudd_ReduceHeap(s_ddmin,CUDD_REORDER_SYMM_SIFT,1);
+ nNodesEnd = Cudd_DagSize( aFmin );
+ Cudd_RecursiveDeref( s_ddmin, aFmin );
+
+ printf( "Classical reordering of ADDs: Before = %d. After = %d.\n", nNodesBeg, nNodesEnd );
+ printf( "Classical variable reordering time = %.2f sec\n", (float)(clock() - clk1)/(float)(CLOCKS_PER_SEC) );
+ return nNodesEnd;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoTransfer.c b/src/bdd/reo/reoTransfer.c
new file mode 100644
index 00000000..752cd3d7
--- /dev/null
+++ b/src/bdd/reo/reoTransfer.c
@@ -0,0 +1,199 @@
+/**CFile****************************************************************
+
+ FileName [reoTransfer.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Transfering a DD from the CUDD manager into REO"s internal data structures and back.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoTransfer.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis [Transfers the DD into the internal reordering data structure.]
+
+ Description [It is important that the hash table is lossless.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+reo_unit * reoTransferNodesToUnits_rec( reo_man * p, DdNode * F )
+{
+ DdManager * dd = p->dd;
+ reo_unit * pUnit;
+ int HKey, fComp;
+
+ fComp = Cudd_IsComplement(F);
+ F = Cudd_Regular(F);
+
+ // check the hash-table
+ if ( F->ref != 1 )
+ {
+ // search cache - use linear probing
+ for ( HKey = hashKey2(p->Signature,F,p->nTableSize); p->HTable[HKey].Sign == p->Signature; HKey = (HKey+1) % p->nTableSize )
+ if ( p->HTable[HKey].Arg1 == (unsigned)F )
+ {
+ pUnit = (reo_unit*) p->HTable[HKey].Arg2;
+ assert( pUnit );
+ // increment the edge counter
+ pUnit->n++;
+ return Unit_NotCond( pUnit, fComp );
+ }
+ }
+ // the entry in not found in the cache
+
+ // create a new entry
+ pUnit = reoUnitsGetNextUnit( p );
+ pUnit->n = 1;
+ if ( cuddIsConstant(F) )
+ {
+ pUnit->lev = REO_CONST_LEVEL;
+ pUnit->pE = (reo_unit*)((int)(cuddV(F)));
+ pUnit->pT = NULL;
+ // check if the diagram that is being reordering has complement edges
+ if ( F != dd->one )
+ p->fThisIsAdd = 1;
+ // insert the unit into the corresponding plane
+ reoUnitsAddUnitToPlane( &(p->pPlanes[p->nSupp]), pUnit ); // increments the unit counter
+ }
+ else
+ {
+ pUnit->lev = p->pMapToPlanes[F->index];
+ pUnit->pE = reoTransferNodesToUnits_rec( p, cuddE(F) );
+ pUnit->pT = reoTransferNodesToUnits_rec( p, cuddT(F) );
+ // insert the unit into the corresponding plane
+ reoUnitsAddUnitToPlane( &(p->pPlanes[pUnit->lev]), pUnit ); // increments the unit counter
+ }
+
+ // add to the hash table
+ if ( F->ref != 1 )
+ {
+ // the next free entry is already found - it is pointed to by HKey
+ // while we traversed the diagram, the hash entry to which HKey points,
+ // might have been used. Make sure that its signature is different.
+ for ( ; p->HTable[HKey].Sign == p->Signature; HKey = (HKey+1) % p->nTableSize );
+ p->HTable[HKey].Sign = p->Signature;
+ p->HTable[HKey].Arg1 = (unsigned)F;
+ p->HTable[HKey].Arg2 = (unsigned)pUnit;
+ }
+
+ // increment the counter of nodes
+ p->nNodesCur++;
+ return Unit_NotCond( pUnit, fComp );
+}
+
+/**Function*************************************************************
+
+ Synopsis [Creates the DD from the internal reordering data structure.]
+
+ Description [It is important that the hash table is lossless.]
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+DdNode * reoTransferUnitsToNodes_rec( reo_man * p, reo_unit * pUnit )
+{
+ DdManager * dd = p->dd;
+ DdNode * bRes, * E, * T;
+ int HKey, fComp;
+
+ fComp = Cudd_IsComplement(pUnit);
+ pUnit = Unit_Regular(pUnit);
+
+ // check the hash-table
+ if ( pUnit->n != 1 )
+ {
+ for ( HKey = hashKey2(p->Signature,pUnit,p->nTableSize); p->HTable[HKey].Sign == p->Signature; HKey = (HKey+1) % p->nTableSize )
+ if ( p->HTable[HKey].Arg1 == (unsigned)pUnit )
+ {
+ bRes = (DdNode*) p->HTable[HKey].Arg2;
+ assert( bRes );
+ return Cudd_NotCond( bRes, fComp );
+ }
+ }
+
+ // treat the case of constants
+ if ( Unit_IsConstant(pUnit) )
+ {
+ bRes = cuddUniqueConst( dd, ((double)((int)(pUnit->pE))) );
+ cuddRef( bRes );
+ }
+ else
+ {
+ // split and recur on children of this node
+ E = reoTransferUnitsToNodes_rec( p, pUnit->pE );
+ if ( E == NULL )
+ return NULL;
+ cuddRef(E);
+
+ T = reoTransferUnitsToNodes_rec( p, pUnit->pT );
+ if ( T == NULL )
+ {
+ Cudd_RecursiveDeref(dd, E);
+ return NULL;
+ }
+ cuddRef(T);
+
+ // consider the case when Res0 and Res1 are the same node
+ assert( E != T );
+ assert( !Cudd_IsComplement(T) );
+
+ bRes = cuddUniqueInter( dd, p->pMapToDdVarsFinal[pUnit->lev], T, E );
+ if ( bRes == NULL )
+ {
+ Cudd_RecursiveDeref(dd,E);
+ Cudd_RecursiveDeref(dd,T);
+ return NULL;
+ }
+ cuddRef( bRes );
+ cuddDeref( E );
+ cuddDeref( T );
+ }
+
+ // do not keep the result if the ref count is only 1, since it will not be visited again
+ if ( pUnit->n != 1 )
+ {
+ // while we traversed the diagram, the hash entry to which HKey points,
+ // might have been used. Make sure that its signature is different.
+ for ( ; p->HTable[HKey].Sign == p->Signature; HKey = (HKey+1) % p->nTableSize );
+ p->HTable[HKey].Sign = p->Signature;
+ p->HTable[HKey].Arg1 = (unsigned)pUnit;
+ p->HTable[HKey].Arg2 = (unsigned)bRes;
+
+ // add the DD to the referenced DD list in order to be able to store it in cache
+ p->pRefNodes[p->nRefNodes++] = bRes; Cudd_Ref( bRes );
+ // no need to do this, because the garbage collection will not take bRes away
+ // it is held by the diagram in the making
+ }
+ // increment the counter of nodes
+ p->nNodesCur++;
+ cuddDeref( bRes );
+ return Cudd_NotCond( bRes, fComp );
+}
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+
diff --git a/src/bdd/reo/reoUnits.c b/src/bdd/reo/reoUnits.c
new file mode 100644
index 00000000..aa86516e
--- /dev/null
+++ b/src/bdd/reo/reoUnits.c
@@ -0,0 +1,184 @@
+/**CFile****************************************************************
+
+ FileName [reoUnits.c]
+
+ PackageName [REO: A specialized DD reordering engine.]
+
+ Synopsis [Procedures which support internal data structures.]
+
+ Author [Alan Mishchenko]
+
+ Affiliation [UC Berkeley]
+
+ Date [Ver. 1.0. Started - October 15, 2002.]
+
+ Revision [$Id: reoUnits.c,v 1.0 2002/15/10 03:00:00 alanmi Exp $]
+
+***********************************************************************/
+
+#include "reo.h"
+
+////////////////////////////////////////////////////////////////////////
+/// DECLARATIONS ///
+////////////////////////////////////////////////////////////////////////
+
+static void reoUnitsAddToFreeUnitList( reo_man * p );
+
+////////////////////////////////////////////////////////////////////////
+/// FUNCTION DEFINITIONS ///
+////////////////////////////////////////////////////////////////////////
+
+/**Function*************************************************************
+
+ Synopsis [Extract the next unit from the free unit list.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+reo_unit * reoUnitsGetNextUnit(reo_man * p )
+{
+ reo_unit * pUnit;
+ // check there are stil units to extract
+ if ( p->pUnitFreeList == NULL )
+ reoUnitsAddToFreeUnitList( p );
+ // extract the next unit from the linked list
+ pUnit = p->pUnitFreeList;
+ p->pUnitFreeList = pUnit->Next;
+ p->nUnitsUsed++;
+ return pUnit;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Returns the unit to the free unit list.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoUnitsRecycleUnit( reo_man * p, reo_unit * pUnit )
+{
+ pUnit->Next = p->pUnitFreeList;
+ p->pUnitFreeList = pUnit;
+ p->nUnitsUsed--;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Returns the list of units to the free unit list.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoUnitsRecycleUnitList( reo_man * p, reo_plane * pPlane )
+{
+ reo_unit * pUnit;
+ reo_unit * pTail;
+
+ if ( pPlane->pHead == NULL )
+ return;
+
+ // find the tail
+ for ( pUnit = pPlane->pHead; pUnit; pUnit = pUnit->Next )
+ pTail = pUnit;
+ pTail->Next = p->pUnitFreeList;
+ p->pUnitFreeList = pPlane->pHead;
+ memset( pPlane, 0, sizeof(reo_plane) );
+}
+
+/**Function*************************************************************
+
+ Synopsis [Stops the unit dispenser.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoUnitsStopDispenser( reo_man * p )
+{
+ int i;
+ for ( i = 0; i < p->nMemChunks; i++ )
+ free( p->pMemChunks[i] );
+// printf("\nThe number of chunks used is %d, each of them %d units\n", p->nMemChunks, REO_CHUNK_SIZE );
+ p->nMemChunks = 0;
+}
+
+/**Function*************************************************************
+
+ Synopsis [Adds one unit to the list of units which constitutes the plane.]
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoUnitsAddUnitToPlane( reo_plane * pPlane, reo_unit * pUnit )
+{
+ if ( pPlane->pHead == NULL )
+ {
+ pPlane->pHead = pUnit;
+ pUnit->Next = NULL;
+ }
+ else
+ {
+ pUnit->Next = pPlane->pHead;
+ pPlane->pHead = pUnit;
+ }
+ pPlane->statsNodes++;
+}
+
+
+/**Function*************************************************************
+
+ Synopsis []
+
+ Description []
+
+ SideEffects []
+
+ SeeAlso []
+
+***********************************************************************/
+void reoUnitsAddToFreeUnitList( reo_man * p )
+{
+ int c;
+ // check that we still have chunks left
+ if ( p->nMemChunks == p->nMemChunksAlloc )
+ {
+ printf( "reoUnitsAddToFreeUnitList(): Memory manager ran out of memory!\n" );
+ fflush( stdout );
+ return;
+ }
+ // allocate the next chunk
+ assert( p->pUnitFreeList == NULL );
+ p->pUnitFreeList = ALLOC( reo_unit, REO_CHUNK_SIZE );
+ // split chunks into list-connected units
+ for ( c = 0; c < REO_CHUNK_SIZE-1; c++ )
+ (p->pUnitFreeList + c)->Next = p->pUnitFreeList + c + 1;
+ // set the last pointer to NULL
+ (p->pUnitFreeList + REO_CHUNK_SIZE-1)->Next = NULL;
+ // add the chunk to the array of chunks
+ p->pMemChunks[p->nMemChunks++] = p->pUnitFreeList;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+/// END OF FILE ///
+////////////////////////////////////////////////////////////////////////
+