#ifndef __INC_LIB8TION_SCALE_H #define __INC_LIB8TION_SCALE_H ///@ingroup lib8tion ///@defgroup Scaling Scaling functions /// Fast, efficient 8-bit scaling functions specifically /// designed for high-performance LED programming. /// /// Because of the AVR(Arduino) and ARM assembly language /// implementations provided, using these functions often /// results in smaller and faster code than the equivalent /// program using plain "C" arithmetic and logic. ///@{ /// scale one byte by a second one, which is treated as /// the numerator of a fraction whose denominator is 256 /// In other words, it computes i * (scale / 256) /// 4 clocks AVR with MUL, 2 clocks ARM LIB8STATIC_ALWAYS_INLINE uint8_t scale8( uint8_t i, fract8 scale) { #if SCALE8_C == 1 #if (FASTLED_SCALE8_FIXED == 1) return (((uint16_t)i) * (1+(uint16_t)(scale))) >> 8; #else return ((uint16_t)i * (uint16_t)(scale) ) >> 8; #endif #elif SCALE8_AVRASM == 1 #if defined(LIB8_ATTINY) #if (FASTLED_SCALE8_FIXED == 1) uint8_t work=i; #else uint8_t work=0; #endif uint8_t cnt=0x80; asm volatile( #if (FASTLED_SCALE8_FIXED == 1) " inc %[scale] \n\t" " breq DONE_%= \n\t" " clr %[work] \n\t" #endif "LOOP_%=: \n\t" /*" sbrc %[scale], 0 \n\t" " add %[work], %[i] \n\t" " ror %[work] \n\t" " lsr %[scale] \n\t" " clc \n\t"*/ " sbrc %[scale], 0 \n\t" " add %[work], %[i] \n\t" " ror %[work] \n\t" " lsr %[scale] \n\t" " lsr %[cnt] \n\t" "brcc LOOP_%= \n\t" "DONE_%=: \n\t" : [work] "+r" (work), [cnt] "+r" (cnt) : [scale] "r" (scale), [i] "r" (i) : ); return work; #else asm volatile( #if (FASTLED_SCALE8_FIXED==1) // Multiply 8-bit i * 8-bit scale, giving 16-bit r1,r0 "mul %0, %1 \n\t" // Add i to r0, possibly setting the carry flag "add r0, %0 \n\t" // load the immediate 0 into i (note, this does _not_ touch any flags) "ldi %0, 0x00 \n\t" // walk and chew gum at the same time "adc %0, r1 \n\t" #else /* Multiply 8-bit i * 8-bit scale, giving 16-bit r1,r0 */ "mul %0, %1 \n\t" /* Move the high 8-bits of the product (r1) back to i */ "mov %0, r1 \n\t" /* Restore r1 to "0"; it's expected to always be that */ #endif "clr __zero_reg__ \n\t" : "+a" (i) /* writes to i */ : "a" (scale) /* uses scale */ : "r0", "r1" /* clobbers r0, r1 */ ); /* Return the result */ return i; #endif #else #error "No implementation for scale8 available." #endif } /// The "video" version of scale8 guarantees that the output will /// be only be zero if one or both of the inputs are zero. If both /// inputs are non-zero, the output is guaranteed to be non-zero. /// This makes for better 'video'/LED dimming, at the cost of /// several additional cycles. LIB8STATIC_ALWAYS_INLINE uint8_t scale8_video( uint8_t i, fract8 scale) { #if SCALE8_C == 1 || defined(LIB8_ATTINY) uint8_t j = (((int)i * (int)scale) >> 8) + ((i&&scale)?1:0); // uint8_t nonzeroscale = (scale != 0) ? 1 : 0; // uint8_t j = (i == 0) ? 0 : (((int)i * (int)(scale) ) >> 8) + nonzeroscale; return j; #elif SCALE8_AVRASM == 1 uint8_t j=0; asm volatile( " tst %[i]\n\t" " breq L_%=\n\t" " mul %[i], %[scale]\n\t" " mov %[j], r1\n\t" " clr __zero_reg__\n\t" " cpse %[scale], r1\n\t" " subi %[j], 0xFF\n\t" "L_%=: \n\t" : [j] "+a" (j) : [i] "a" (i), [scale] "a" (scale) : "r0", "r1"); return j; // uint8_t nonzeroscale = (scale != 0) ? 1 : 0; // asm volatile( // " tst %0 \n" // " breq L_%= \n" // " mul %0, %1 \n" // " mov %0, r1 \n" // " add %0, %2 \n" // " clr __zero_reg__ \n" // "L_%=: \n" // : "+a" (i) // : "a" (scale), "a" (nonzeroscale) // : "r0", "r1"); // // Return the result // return i; #else #error "No implementation for scale8_video available." #endif } /// This version of scale8 does not clean up the R1 register on AVR /// If you are doing several 'scale8's in a row, use this, and /// then explicitly call cleanup_R1. LIB8STATIC_ALWAYS_INLINE uint8_t scale8_LEAVING_R1_DIRTY( uint8_t i, fract8 scale) { #if SCALE8_C == 1 #if (FASTLED_SCALE8_FIXED == 1) return (((uint16_t)i) * ((uint16_t)(scale)+1)) >> 8; #else return ((int)i * (int)(scale) ) >> 8; #endif #elif SCALE8_AVRASM == 1 asm volatile( #if (FASTLED_SCALE8_FIXED==1) // Multiply 8-bit i * 8-bit scale, giving 16-bit r1,r0 "mul %0, %1 \n\t" // Add i to r0, possibly setting the carry flag "add r0, %0 \n\t" // load the immediate 0 into i (note, this does _not_ touch any flags) "ldi %0, 0x00 \n\t" // walk and chew gum at the same time "adc %0, r1 \n\t" #else /* Multiply 8-bit i * 8-bit scale, giving 16-bit r1,r0 */ "mul %0, %1 \n\t" /* Move the high 8-bits of the product (r1) back to i */ "mov %0, r1 \n\t" #endif /* R1 IS LEFT DIRTY HERE; YOU MUST ZERO IT OUT YOURSELF */ /* "clr __zero_reg__ \n\t" */ : "+a" (i) /* writes to i */ : "a" (scale) /* uses scale */ : "r0", "r1" /* clobbers r0, r1 */ ); // Return the result return i; #else #error "No implementation for scale8_LEAVING_R1_DIRTY available." #endif } /// This version of scale8_video does not clean up the R1 register on AVR /// If you are doing several 'scale8_video's in a row, use this, and /// then explicitly call cleanup_R1. LIB8STATIC_ALWAYS_INLINE uint8_t scale8_video_LEAVING_R1_DIRTY( uint8_t i, fract8 scale) { #if SCALE8_C == 1 || defined(LIB8_ATTINY) uint8_t j = (((int)i * (int)scale) >> 8) + ((i&&scale)?1:0); // uint8_t nonzeroscale = (scale != 0) ? 1 : 0; // uint8_t j = (i == 0) ? 0 : (((int)i * (int)(scale) ) >> 8) + nonzeroscale; return j; #elif SCALE8_AVRASM == 1 uint8_t j=0; asm volatile( " tst %[i]\n\t" " breq L_%=\n\t" " mul %[i], %[scale]\n\t" " mov %[j], r1\n\t" " breq L_%=\n\t" " subi %[j], 0xFF\n\t" "L_%=: \n\t" : [j] "+a" (j) : [i] "a" (i), [scale] "a" (scale) : "r0", "r1"); return j; // uint8_t nonzeroscale = (scale != 0) ? 1 : 0; // asm volatile( // " tst %0 \n" // " breq L_%= \n" // " mul %0, %1 \n" // " mov %0, r1 \n" // " add %0, %2 \n" // " clr __zero_reg__ \n" // "L_%=: \n" // : "+a" (i) // : "a" (scale), "a" (nonzeroscale) // : "r0", "r1"); // // Return the result // return i; #else #error "No implementation for scale8_video_LEAVING_R1_DIRTY available." #endif } /// Clean up the r1 register after a series of *LEAVING_R1_DIRTY calls LIB8STATIC_ALWAYS_INLINE void cleanup_R1(void) { #if CLEANUP_R1_AVRASM == 1 // Restore r1 to "0"; it's expected to always be that asm volatile( "clr __zero_reg__ \n\t" : : : "r1" ); #endif } /// scale a 16-bit unsigned value by an 8-bit value, /// considered as numerator of a fraction whose denominator /// is 256. In other words, it computes i * (scale / 256) LIB8STATIC_ALWAYS_INLINE uint16_t scale16by8( uint16_t i, fract8 scale ) { #if SCALE16BY8_C == 1 uint16_t result; #if FASTLED_SCALE8_FIXED == 1 result = (i * (1+((uint16_t)scale))) >> 8; #else result = (i * scale) / 256; #endif return result; #elif SCALE16BY8_AVRASM == 1 #if FASTLED_SCALE8_FIXED == 1 uint16_t result = 0; asm volatile( // result.A = HighByte( (i.A x scale) + i.A ) " mul %A[i], %[scale] \n\t" " add r0, %A[i] \n\t" // " adc r1, [zero] \n\t" // " mov %A[result], r1 \n\t" " adc %A[result], r1 \n\t" // result.A-B += i.B x scale " mul %B[i], %[scale] \n\t" " add %A[result], r0 \n\t" " adc %B[result], r1 \n\t" // cleanup r1 " clr __zero_reg__ \n\t" // result.A-B += i.B " add %A[result], %B[i] \n\t" " adc %B[result], __zero_reg__ \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); return result; #else uint16_t result = 0; asm volatile( // result.A = HighByte(i.A x j ) " mul %A[i], %[scale] \n\t" " mov %A[result], r1 \n\t" //" clr %B[result] \n\t" // result.A-B += i.B x j " mul %B[i], %[scale] \n\t" " add %A[result], r0 \n\t" " adc %B[result], r1 \n\t" // cleanup r1 " clr __zero_reg__ \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); return result; #endif #else #error "No implementation for scale16by8 available." #endif } /// scale a 16-bit unsigned value by a 16-bit value, /// considered as numerator of a fraction whose denominator /// is 65536. In other words, it computes i * (scale / 65536) LIB8STATIC uint16_t scale16( uint16_t i, fract16 scale ) { #if SCALE16_C == 1 uint16_t result; #if FASTLED_SCALE8_FIXED == 1 result = ((uint32_t)(i) * (1+(uint32_t)(scale))) / 65536; #else result = ((uint32_t)(i) * (uint32_t)(scale)) / 65536; #endif return result; #elif SCALE16_AVRASM == 1 #if FASTLED_SCALE8_FIXED == 1 // implemented sort of like // result = ((i * scale) + i ) / 65536 // // why not like this, you may ask? // result = (i * (scale+1)) / 65536 // the answer is that if scale is 65535, then scale+1 // will be zero, which is not what we want. uint32_t result; asm volatile( // result.A-B = i.A x scale.A " mul %A[i], %A[scale] \n\t" // save results... // basic idea: //" mov %A[result], r0 \n\t" //" mov %B[result], r1 \n\t" // which can be written as... " movw %A[result], r0 \n\t" // Because we're going to add i.A-B to // result.A-D, we DO need to keep both // the r0 and r1 portions of the product // UNlike in the 'unfixed scale8' version. // So the movw here is needed. : [result] "=r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); asm volatile( // result.C-D = i.B x scale.B " mul %B[i], %B[scale] \n\t" //" mov %C[result], r0 \n\t" //" mov %D[result], r1 \n\t" " movw %C[result], r0 \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); const uint8_t zero = 0; asm volatile( // result.B-D += i.B x scale.A " mul %B[i], %A[scale] \n\t" " add %B[result], r0 \n\t" " adc %C[result], r1 \n\t" " adc %D[result], %[zero] \n\t" // result.B-D += i.A x scale.B " mul %A[i], %B[scale] \n\t" " add %B[result], r0 \n\t" " adc %C[result], r1 \n\t" " adc %D[result], %[zero] \n\t" // cleanup r1 " clr r1 \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale), [zero] "r" (zero) : "r0", "r1" ); asm volatile( // result.A-D += i.A-B " add %A[result], %A[i] \n\t" " adc %B[result], %B[i] \n\t" " adc %C[result], %[zero] \n\t" " adc %D[result], %[zero] \n\t" : [result] "+r" (result) : [i] "r" (i), [zero] "r" (zero) ); result = result >> 16; return result; #else uint32_t result; asm volatile( // result.A-B = i.A x scale.A " mul %A[i], %A[scale] \n\t" // save results... // basic idea: //" mov %A[result], r0 \n\t" //" mov %B[result], r1 \n\t" // which can be written as... " movw %A[result], r0 \n\t" // We actually don't need to do anything with r0, // as result.A is never used again here, so we // could just move the high byte, but movw is // one clock cycle, just like mov, so might as // well, in case we want to use this code for // a generic 16x16 multiply somewhere. : [result] "=r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); asm volatile( // result.C-D = i.B x scale.B " mul %B[i], %B[scale] \n\t" //" mov %C[result], r0 \n\t" //" mov %D[result], r1 \n\t" " movw %C[result], r0 \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale) : "r0", "r1" ); const uint8_t zero = 0; asm volatile( // result.B-D += i.B x scale.A " mul %B[i], %A[scale] \n\t" " add %B[result], r0 \n\t" " adc %C[result], r1 \n\t" " adc %D[result], %[zero] \n\t" // result.B-D += i.A x scale.B " mul %A[i], %B[scale] \n\t" " add %B[result], r0 \n\t" " adc %C[result], r1 \n\t" " adc %D[result], %[zero] \n\t" // cleanup r1 " clr r1 \n\t" : [result] "+r" (result) : [i] "r" (i), [scale] "r" (scale), [zero] "r" (zero) : "r0", "r1" ); result = result >> 16; return result; #endif #else #error "No implementation for scale16 available." #endif } ///@} ///@defgroup Dimming Dimming and brightening functions /// /// Dimming and brightening functions /// /// The eye does not respond in a linear way to light. /// High speed PWM'd LEDs at 50% duty cycle appear far /// brighter then the 'half as bright' you might expect. /// /// If you want your midpoint brightness leve (128) to /// appear half as bright as 'full' brightness (255), you /// have to apply a 'dimming function'. ///@{ /// Adjust a scaling value for dimming LIB8STATIC uint8_t dim8_raw( uint8_t x) { return scale8( x, x); } /// Adjust a scaling value for dimming for video (value will never go below 1) LIB8STATIC uint8_t dim8_video( uint8_t x) { return scale8_video( x, x); } /// Linear version of the dimming function that halves for values < 128 LIB8STATIC uint8_t dim8_lin( uint8_t x ) { if( x & 0x80 ) { x = scale8( x, x); } else { x += 1; x /= 2; } return x; } /// inverse of the dimming function, brighten a value LIB8STATIC uint8_t brighten8_raw( uint8_t x) { uint8_t ix = 255 - x; return 255 - scale8( ix, ix); } /// inverse of the dimming function, brighten a value LIB8STATIC uint8_t brighten8_video( uint8_t x) { uint8_t ix = 255 - x; return 255 - scale8_video( ix, ix); } /// inverse of the dimming function, brighten a value LIB8STATIC uint8_t brighten8_lin( uint8_t x ) { uint8_t ix = 255 - x; if( ix & 0x80 ) { ix = scale8( ix, ix); } else { ix += 1; ix /= 2; } return 255 - ix; } ///@} #endif ' href='#n434'>434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
/*
 * Revision Control Information
 *
 * $Source$
 * $Author$
 * $Revision$
 * $Date$
 *
 */
/*
    contain.c -- set containment routines

    These are complex routines for performing containment over a
    family of sets, but they have the advantage of being much faster
    than a straightforward n*n routine.

    First the cubes are sorted by size, and as a secondary key they are
    sorted so that if two cubes are equal they end up adjacent.  We can
    than quickly remove equal cubes from further consideration by
    comparing each cube to its neighbor.  Finally, because the cubes
    are sorted by size, we need only check cubes which are larger (or
    smaller) than a given cube for containment.
*/

#include "espresso.h"

ABC_NAMESPACE_IMPL_START



/*
    sf_contain -- perform containment on a set family (delete sets which
    are contained by some larger set in the family).  No assumptions are
    made about A, and the result will be returned in decreasing order of
    set size.
*/
pset_family sf_contain(A)
INOUT pset_family A;            /* disposes of A */
{
    int cnt;
    pset *A1;
    pset_family R;

    A1 = sf_sort(A, descend);           /* sort into descending order */
    cnt = rm_equal(A1, descend);        /* remove duplicates */
    cnt = rm_contain(A1);               /* remove contained sets */
    R = sf_unlist(A1, cnt, A->sf_size); /* recreate the set family */
    sf_free(A);
    return R;
}


/*
    sf_rev_contain -- perform containment on a set family (delete sets which
    contain some smaller set in the family).  No assumptions are made about
    A, and the result will be returned in increasing order of set size
*/
pset_family sf_rev_contain(A)
INOUT pset_family A;            /* disposes of A */
{
    int cnt;
    pset *A1;
    pset_family R;

    A1 = sf_sort(A, ascend);            /* sort into ascending order */
    cnt = rm_equal(A1, ascend);         /* remove duplicates */
    cnt = rm_rev_contain(A1);           /* remove containing sets */
    R = sf_unlist(A1, cnt, A->sf_size); /* recreate the set family */
    sf_free(A);
    return R;
}


/*
    sf_ind_contain -- perform containment on a set family (delete sets which
    are contained by some larger set in the family).  No assumptions are
    made about A, and the result will be returned in decreasing order of
    set size.  Also maintains a set of row_indices to track which rows
    disappear and how the rows end up permuted.
*/
pset_family sf_ind_contain(A, row_indices)
INOUT pset_family A;            /* disposes of A */
INOUT int *row_indices;         /* updated with the new values */
{
    int cnt;
    pset *A1;
    pset_family R;

    A1 = sf_sort(A, descend);           /* sort into descending order */
    cnt = rm_equal(A1, descend);        /* remove duplicates */
    cnt = rm_contain(A1);               /* remove contained sets */
    R = sf_ind_unlist(A1, cnt, A->sf_size, row_indices, A->data);
    sf_free(A);
    return R;
}


/* sf_dupl -- delete duplicate sets in a set family */
pset_family sf_dupl(A)
INOUT pset_family A;            /* disposes of A */
{
    register int cnt;
    register pset *A1;
    pset_family R;

    A1 = sf_sort(A, descend);           /* sort the set family */
    cnt = rm_equal(A1, descend);        /* remove duplicates */
    R = sf_unlist(A1, cnt, A->sf_size); /* recreate the set family */
    sf_free(A);
    return R;
}


/*
    sf_union -- form the contained union of two set families (delete
    sets which are contained by some larger set in the family).  A and
    B are assumed already sorted in decreasing order of set size (and
    the SIZE field is assumed to contain the set size), and the result
    will be returned sorted likewise.
*/
pset_family sf_union(A, B)
INOUT pset_family A, B;         /* disposes of A and B */
{
    int cnt;
    pset_family R;
    pset *A1 = sf_list(A), *B1 = sf_list(B), *E1;

    E1 = ALLOC(pset, MAX(A->count, B->count) + 1);
    cnt = rm2_equal(A1, B1, E1, descend);
    cnt += rm2_contain(A1, B1) + rm2_contain(B1, A1);
    R = sf_merge(A1, B1, E1, cnt, A->sf_size);
    sf_free(A); sf_free(B);
    return R;
}


/*
    dist_merge -- consider all sets to be "or"-ed with "mask" and then
    delete duplicates from the set family.
*/
pset_family dist_merge(A, mask)
INOUT pset_family A;            /* disposes of A */
IN pset mask;                   /* defines variables to mask out */
{
    pset *A1;
    int cnt;
    pset_family R;

    (void) set_copy(cube.temp[0], mask);
    A1 = sf_sort(A, d1_order);
    cnt = d1_rm_equal(A1, d1_order);
    R = sf_unlist(A1, cnt, A->sf_size);
    sf_free(A);
    return R;
}


/*
    d1merge -- perform an efficient distance-1 merge of cubes of A
*/
pset_family d1merge(A, var)
INOUT pset_family A;            /* disposes of A */
IN int var;
{
    return dist_merge(A, cube.var_mask[var]);
}



/* d1_rm_equal -- distance-1 merge (merge cubes which are equal under a mask) */
int d1_rm_equal(A1, compare)
register pset *A1;                /* array of set pointers */
int (*compare)();                /* comparison function */
{
    register int i, j, dest;

    dest = 0;
    if (A1[0] != (pcube) NULL) {
    for(i = 0, j = 1; A1[j] != (pcube) NULL; j++)
        if ( (*compare)(&A1[i], &A1[j]) == 0) {
        /* if sets are equal (under the mask) merge them */
        (void) set_or(A1[i], A1[i], A1[j]);
        } else {
        /* sets are unequal, so save the set i */
        A1[dest++] = A1[i];
        i = j;
        }
    A1[dest++] = A1[i];
    }
    A1[dest] = (pcube) NULL;
    return dest;
}


/* rm_equal -- scan a sorted array of set pointers for duplicate sets */
int rm_equal(A1, compare)
INOUT pset *A1;                 /* updated in place */
IN int (*compare)();
{
    register pset *p, *pdest = A1;

    if (*A1 != NULL) {                  /* If more than one set */
    for(p = A1+1; *p != NULL; p++)
        if ((*compare)(p, p-1) != 0)
        *pdest++ = *(p-1);
    *pdest++ = *(p-1);
    *pdest = NULL;
    }
    return pdest - A1;
}


/* rm_contain -- perform containment over a sorted array of set pointers */
int rm_contain(A1)
INOUT pset *A1;                 /* updated in place */
{
    register pset *pa, *pb;
    register pset *pcheck = NULL; // Suppress "might be used uninitialized"
    register pset a, b;
    pset *pdest = A1;
    int last_size = -1;

    /* Loop for all cubes of A1 */
    for(pa = A1; (a = *pa++) != NULL; ) {
    /* Update the check pointer if the size has changed */
    if (SIZE(a) != last_size)
        last_size = SIZE(a), pcheck = pdest;
    for(pb = A1; pb != pcheck; ) {
        b = *pb++;
        INLINEsetp_implies(a, b, /* when_false => */ continue);
        goto lnext1;
    }
    /* set a was not contained by some larger set, so save it */
    *pdest++ = a;
    lnext1: ;
    }

    *pdest = NULL;
    return pdest - A1;
}


/* rm_rev_contain -- perform rcontainment over a sorted array of set pointers */
int rm_rev_contain(A1)
INOUT pset *A1;                 /* updated in place */
{
    register pset *pa, *pb;
    register pset *pcheck = NULL; // Suppress "might be used uninitialized"
    register pset a, b;
    pset *pdest = A1;
    int last_size = -1;

    /* Loop for all cubes of A1 */
    for(pa = A1; (a = *pa++) != NULL; ) {
    /* Update the check pointer if the size has changed */
    if (SIZE(a) != last_size)
        last_size = SIZE(a), pcheck = pdest;
    for(pb = A1; pb != pcheck; ) {
        b = *pb++;
        INLINEsetp_implies(b, a, /* when_false => */ continue);
        goto lnext1;
    }
    /* the set a did not contain some smaller set, so save it */
    *pdest++ = a;
    lnext1: ;
    }

    *pdest = NULL;
    return pdest - A1;
}


/* rm2_equal -- check two sorted arrays of set pointers for equal cubes */
int rm2_equal(A1, B1, E1, compare)
INOUT register pset *A1, *B1;           /* updated in place */
OUT pset *E1;
IN int (*compare)();
{
    register pset *pda = A1, *pdb = B1, *pde = E1;

    /* Walk through the arrays advancing pointer to larger cube */
    for(; *A1 != NULL && *B1 != NULL; )
    switch((*compare)(A1, B1)) {
        case -1:    /* "a" comes before "b" */
        *pda++ = *A1++; break;
        case 0:     /* equal cubes */
        *pde++ = *A1++; B1++; break;
        case 1:     /* "a" is to follow "b" */
        *pdb++ = *B1++; break;
    }

    /* Finish moving down the pointers of A and B */
    while (*A1 != NULL)
    *pda++ = *A1++;
    while (*B1 != NULL)
    *pdb++ = *B1++;
    *pda = *pdb = *pde = NULL;

    return pde - E1;
}


/* rm2_contain -- perform containment between two arrays of set pointers */
int rm2_contain(A1, B1)
INOUT pset *A1;                 /* updated in place */
IN pset *B1;                    /* unchanged */
{
    register pset *pa, *pb, a, b, *pdest = A1;

    /* for each set in the first array ... */
    for(pa = A1; (a = *pa++) != NULL; ) {
    /* for each set in the second array which is larger ... */
    for(pb = B1; (b = *pb++) != NULL && SIZE(b) > SIZE(a); ) {
        INLINEsetp_implies(a, b, /* when_false => */ continue);
        /* set was contained in some set of B, so don't save pointer */
        goto lnext1;
    }
    /* set wasn't contained in any set of B, so save the pointer */
    *pdest++ = a;
    lnext1: ;
    }

    *pdest = NULL;                      /* sentinel */
    return pdest - A1;                  /* # elements in A1 */
}



/* sf_sort -- sort the sets of A */
pset *sf_sort(A, compare)
IN pset_family A;
IN int (*compare)();
{
    register pset p, last, *pdest, *A1;

    /* Create a single array pointing to each cube of A */
    pdest = A1 = ALLOC(pset, A->count + 1);
    foreach_set(A, last, p) {
    PUTSIZE(p, set_ord(p));         /* compute the set size */
    *pdest++ = p;                   /* save the pointer */
    }
    *pdest = NULL;                      /* Sentinel -- never seen by sort */

    /* Sort cubes by size */
    qsort((char *) A1, A->count, sizeof(pset), compare);
    return A1;
}


/* sf_list -- make a list of pointers to the sets in a set family */
pset *sf_list(A)
IN register pset_family A;
{
    register pset p, last, *pdest, *A1;

    /* Create a single array pointing to each cube of A */
    pdest = A1 = ALLOC(pset, A->count + 1);
    foreach_set(A, last, p)
    *pdest++ = p;                   /* save the pointer */
    *pdest = NULL;                      /* Sentinel */
    return A1;
}


/* sf_unlist -- make a set family out of a list of pointers to sets */
pset_family sf_unlist(A1, totcnt, size)
IN pset *A1;
IN int totcnt, size;
{
    register pset pr, p, *pa;
    pset_family R = sf_new(totcnt, size);

    R->count = totcnt;
    for(pr = R->data, pa = A1; (p = *pa++) != NULL; pr += R->wsize)
    INLINEset_copy(pr, p);
    FREE(A1);
    return R;
}


/* sf_ind_unlist -- make a set family out of a list of pointers to sets */
pset_family sf_ind_unlist(A1, totcnt, size, row_indices, pfirst)
IN pset *A1;
IN int totcnt, size;
INOUT int *row_indices;
IN register pset pfirst;
{
    register pset pr, p, *pa;
    register int i, *new_row_indices;
    pset_family R = sf_new(totcnt, size);

    R->count = totcnt;
    new_row_indices = ALLOC(int, totcnt);
    for(pr = R->data, pa = A1, i=0; (p = *pa++) != NULL; pr += R->wsize, i++) {
    INLINEset_copy(pr, p);
    new_row_indices[i] = row_indices[(p - pfirst)/R->wsize];
    }
    for(i = 0; i < totcnt; i++)
    row_indices[i] = new_row_indices[i];
    FREE(new_row_indices);
    FREE(A1);
    return R;
}


/* sf_merge -- merge three sorted lists of set pointers */
pset_family sf_merge(A1, B1, E1, totcnt, size)
INOUT pset *A1, *B1, *E1;               /* will be disposed of */
IN int totcnt, size;
{
    register pset pr, ps, *pmin, *pmid, *pmax;
    pset_family R;
    pset *temp[3], *swap;
    int i, j, n;

    /* Allocate the result set_family */
    R = sf_new(totcnt, size);
    R->count = totcnt;
    pr = R->data;

    /* Quick bubble sort to order the top member of the three arrays */
    n = 3;  temp[0] = A1;  temp[1] = B1;  temp[2] = E1;
    for(i = 0; i < n-1; i++)
    for(j = i+1; j < n; j++)
        if (desc1(*temp[i], *temp[j]) > 0) {
        swap = temp[j];
        temp[j] = temp[i];
        temp[i] = swap;
        }
    pmin = temp[0];  pmid = temp[1];  pmax = temp[2];

    /* Save the minimum element, then update pmin, pmid, pmax */
    while (*pmin != (pset) NULL) {
    ps = *pmin++;
    INLINEset_copy(pr, ps);
    pr += R->wsize;
    if (desc1(*pmin, *pmax) > 0) {
        swap = pmax; pmax = pmin; pmin = pmid; pmid = swap;
    } else if (desc1(*pmin, *pmid) > 0) {
        swap = pmin; pmin = pmid; pmid = swap;
    }
    }

    FREE(A1);
    FREE(B1);
    FREE(E1);
    return R;
}
ABC_NAMESPACE_IMPL_END