tristate
select ASYNC_CORE
select ASYNC_PQ
+ select ASYNC_XOR
+
+ config ASYNC_RAID6_TEST
+ tristate "Self test for hardware accelerated raid6 recovery"
+ depends on ASYNC_RAID6_RECOV
+ select ASYNC_MEMCPY
+ ---help---
+ This is a one-shot self test that permutes through the
+ recovery of all the possible two disk failure scenarios for a
+ N-disk array. Recovery is performed with the asynchronous
+ raid6 recovery routines, and will optionally use an offload
+ engine if one is available.
+
+ If unsure, say N.
+config ASYNC_TX_DISABLE_PQ_VAL_DMA
+ bool
+
+config ASYNC_TX_DISABLE_XOR_VAL_DMA
+ bool
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
-
- targets += raid6int1.c
- $(obj)/raid6int1.c: UNROLL := 1
- $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- targets += raid6int2.c
- $(obj)/raid6int2.c: UNROLL := 2
- $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- targets += raid6int4.c
- $(obj)/raid6int4.c: UNROLL := 4
- $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- targets += raid6int8.c
- $(obj)/raid6int8.c: UNROLL := 8
- $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- targets += raid6int16.c
- $(obj)/raid6int16.c: UNROLL := 16
- $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- targets += raid6int32.c
- $(obj)/raid6int32.c: UNROLL := 32
- $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- CFLAGS_raid6altivec1.o += $(altivec_flags)
- targets += raid6altivec1.c
- $(obj)/raid6altivec1.c: UNROLL := 1
- $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- CFLAGS_raid6altivec2.o += $(altivec_flags)
- targets += raid6altivec2.c
- $(obj)/raid6altivec2.c: UNROLL := 2
- $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- CFLAGS_raid6altivec4.o += $(altivec_flags)
- targets += raid6altivec4.c
- $(obj)/raid6altivec4.c: UNROLL := 4
- $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- CFLAGS_raid6altivec8.o += $(altivec_flags)
- targets += raid6altivec8.c
- $(obj)/raid6altivec8.c: UNROLL := 8
- $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
- quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
--
- targets += raid6tables.c
- $(obj)/raid6tables.c: $(obj)/mktables FORCE
- $(call if_changed,mktable)
--- /dev/null
- cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \
+ obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
+
+ raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
+ raid6int1.o raid6int2.o raid6int4.o \
+ raid6int8.o raid6int16.o raid6int32.o \
+ raid6altivec1.o raid6altivec2.o raid6altivec4.o \
+ raid6altivec8.o \
+ raid6mmx.o raid6sse1.o raid6sse2.o
+ hostprogs-y += mktables
+
+ quiet_cmd_unroll = UNROLL $@
-$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
+ < $< > $@ || ( rm -f $@ && exit 1 )
+
+ ifeq ($(CONFIG_ALTIVEC),y)
+ altivec_flags := -maltivec -mabi=altivec
+ endif
+
+ targets += raid6int1.c
+ $(obj)/raid6int1.c: UNROLL := 1
-$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ targets += raid6int2.c
+ $(obj)/raid6int2.c: UNROLL := 2
-$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ targets += raid6int4.c
+ $(obj)/raid6int4.c: UNROLL := 4
-$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ targets += raid6int8.c
+ $(obj)/raid6int8.c: UNROLL := 8
-$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ targets += raid6int16.c
+ $(obj)/raid6int16.c: UNROLL := 16
-$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ targets += raid6int32.c
+ $(obj)/raid6int32.c: UNROLL := 32
-$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
++$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ CFLAGS_raid6altivec1.o += $(altivec_flags)
+ targets += raid6altivec1.c
+ $(obj)/raid6altivec1.c: UNROLL := 1
-$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
++$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ CFLAGS_raid6altivec2.o += $(altivec_flags)
+ targets += raid6altivec2.c
+ $(obj)/raid6altivec2.c: UNROLL := 2
-$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
++$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ CFLAGS_raid6altivec4.o += $(altivec_flags)
+ targets += raid6altivec4.c
+ $(obj)/raid6altivec4.c: UNROLL := 4
-$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
++$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ CFLAGS_raid6altivec8.o += $(altivec_flags)
+ targets += raid6altivec8.c
+ $(obj)/raid6altivec8.c: UNROLL := 8
++$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+ quiet_cmd_mktable = TABLE $@
+ cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
+
+ targets += raid6tables.c
+ $(obj)/raid6tables.c: $(obj)/mktables FORCE
+ $(call if_changed,mktable)
--- /dev/null
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-
+ /* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+ /*
+ * raid6algos.c
+ *
+ * Algorithm list and algorithm selection for RAID-6
+ */
+
+ #include <linux/raid/pq.h>
++#include <linux/gfp.h>
+ #ifndef __KERNEL__
+ #include <sys/mman.h>
+ #include <stdio.h>
+ #else
+ #if !RAID6_USE_EMPTY_ZERO_PAGE
+ /* In .bss so it's zeroed */
+ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
+ EXPORT_SYMBOL(raid6_empty_zero_page);
+ #endif
+ #endif
+
+ struct raid6_calls raid6_call;
+ EXPORT_SYMBOL_GPL(raid6_call);
+
+ const struct raid6_calls * const raid6_algos[] = {
+ &raid6_intx1,
+ &raid6_intx2,
+ &raid6_intx4,
+ &raid6_intx8,
+ #if defined(__ia64__)
+ &raid6_intx16,
+ &raid6_intx32,
+ #endif
+ #if defined(__i386__) && !defined(__arch_um__)
+ &raid6_mmxx1,
+ &raid6_mmxx2,
+ &raid6_sse1x1,
+ &raid6_sse1x2,
+ &raid6_sse2x1,
+ &raid6_sse2x2,
+ #endif
+ #if defined(__x86_64__) && !defined(__arch_um__)
+ &raid6_sse2x1,
+ &raid6_sse2x2,
+ &raid6_sse2x4,
+ #endif
+ #ifdef CONFIG_ALTIVEC
+ &raid6_altivec1,
+ &raid6_altivec2,
+ &raid6_altivec4,
+ &raid6_altivec8,
+ #endif
+ NULL
+ };
+
+ #ifdef __KERNEL__
+ #define RAID6_TIME_JIFFIES_LG2 4
+ #else
+ /* Need more time to be stable in userspace */
+ #define RAID6_TIME_JIFFIES_LG2 9
+ #define time_before(x, y) ((x) < (y))
+ #endif
+
+ /* Try to pick the best algorithm */
+ /* This code uses the gfmul table as convenient data set to abuse */
+
+ int __init raid6_select_algo(void)
+ {
+ const struct raid6_calls * const * algo;
+ const struct raid6_calls * best;
+ char *syndromes;
+ void *dptrs[(65536/PAGE_SIZE)+2];
+ int i, disks;
+ unsigned long perf, bestperf;
+ int bestprefer;
+ unsigned long j0, j1;
+
+ disks = (65536/PAGE_SIZE)+2;
+ for ( i = 0 ; i < disks-2 ; i++ ) {
+ dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
+ }
+
+ /* Normal code - use a 2-page allocation to avoid D$ conflict */
+ syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
+
+ if ( !syndromes ) {
+ printk("raid6: Yikes! No memory available.\n");
+ return -ENOMEM;
+ }
+
+ dptrs[disks-2] = syndromes;
+ dptrs[disks-1] = syndromes + PAGE_SIZE;
+
+ bestperf = 0; bestprefer = 0; best = NULL;
+
+ for ( algo = raid6_algos ; *algo ; algo++ ) {
+ if ( !(*algo)->valid || (*algo)->valid() ) {
+ perf = 0;
+
+ preempt_disable();
+ j0 = jiffies;
+ while ( (j1 = jiffies) == j0 )
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
+ (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
+ perf++;
+ }
+ preempt_enable();
+
+ if ( (*algo)->prefer > bestprefer ||
+ ((*algo)->prefer == bestprefer &&
+ perf > bestperf) ) {
+ best = *algo;
+ bestprefer = best->prefer;
+ bestperf = perf;
+ }
+ printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
+ (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ }
+ }
+
+ if (best) {
+ printk("raid6: using algorithm %s (%ld MB/s)\n",
+ best->name,
+ (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ raid6_call = *best;
+ } else
+ printk("raid6: Yikes! No algorithm found!\n");
+
+ free_pages((unsigned long)syndromes, 1);
+
+ return best ? 0 : -EINVAL;
+ }
+
+ static void raid6_exit(void)
+ {
+ do { } while (0);
+ }
+
+ subsys_initcall(raid6_select_algo);
+ module_exit(raid6_exit);
+ MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
--- /dev/null
- * This file is postprocessed using unroll.pl
+ /* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+ /*
+ * raid6altivec$#.c
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ *
++ * This file is postprocessed using unroll.awk
+ *
+ * <benh> hpa: in process,
+ * you can just "steal" the vec unit with enable_kernel_altivec() (but
+ * bracked this with preempt_disable/enable or in a lock)
+ */
+
+ #include <linux/raid/pq.h>
+
+ #ifdef CONFIG_ALTIVEC
+
+ #include <altivec.h>
+ #ifdef __KERNEL__
+ # include <asm/system.h>
+ # include <asm/cputable.h>
+ #endif
+
+ /*
+ * This is the C data type to use. We use a vector of
+ * signed char so vec_cmpgt() will generate the right
+ * instruction.
+ */
+
+ typedef vector signed char unative_t;
+
+ #define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+ #define NSIZE sizeof(unative_t)
+
+ /*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+ static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
+ {
+ return vec_add(v,v);
+ }
+
+ /*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+ static inline __attribute_const__ unative_t MASK(unative_t v)
+ {
+ unative_t zv = NBYTES(0);
+
+ /* vec_cmpgt returns a vector bool char; thus the need for the cast */
+ return (unative_t)vec_cmpgt(zv, v);
+ }
+
+
+ /* This is noinline to make damned sure that gcc doesn't move any of the
+ Altivec code around the enable/disable code */
+ static void noinline
+ raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs)
+ {
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ unative_t x1d = NBYTES(0x1d);
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ = vec_xor(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ = vec_and(w2$$, x1d);
+ w1$$ = vec_xor(w1$$, w2$$);
+ wq$$ = vec_xor(w1$$, wd$$);
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+ }
+
+ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+ {
+ preempt_disable();
+ enable_kernel_altivec();
+
+ raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
+
+ preempt_enable();
+ }
+
+ int raid6_have_altivec(void);
+ #if $# == 1
+ int raid6_have_altivec(void)
+ {
+ /* This assumes either all CPUs have Altivec or none does */
+ # ifdef __KERNEL__
+ return cpu_has_feature(CPU_FTR_ALTIVEC);
+ # else
+ return 1;
+ # endif
+ }
+ #endif
+
+ const struct raid6_calls raid6_altivec$# = {
+ raid6_altivec$#_gen_syndrome,
+ raid6_have_altivec,
+ "altivecx$#",
+ 0
+ };
+
+ #endif /* CONFIG_ALTIVEC */
--- /dev/null
- * This file is postprocessed using unroll.pl
+ /* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+ /*
+ * raid6int$#.c
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ *
++ * This file is postprocessed using unroll.awk
+ */
+
+ #include <linux/raid/pq.h>
+
+ /*
+ * This is the C data type to use
+ */
+
+ /* Change this from BITS_PER_LONG if there is something better... */
+ #if BITS_PER_LONG == 64
+ # define NBYTES(x) ((x) * 0x0101010101010101UL)
+ # define NSIZE 8
+ # define NSHIFT 3
+ # define NSTRING "64"
+ typedef u64 unative_t;
+ #else
+ # define NBYTES(x) ((x) * 0x01010101U)
+ # define NSIZE 4
+ # define NSHIFT 2
+ # define NSTRING "32"
+ typedef u32 unative_t;
+ #endif
+
+
+
+ /*
+ * IA-64 wants insane amounts of unrolling. On other architectures that
+ * is just a waste of space.
+ */
+ #if ($# <= 8) || defined(__ia64__)
+
+
+ /*
+ * These sub-operations are separate inlines since they can sometimes be
+ * specially optimized using architecture-specific hacks.
+ */
+
+ /*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+ static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
+ {
+ unative_t vv;
+
+ vv = (v << 1) & NBYTES(0xfe);
+ return vv;
+ }
+
+ /*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+ static inline __attribute_const__ unative_t MASK(unative_t v)
+ {
+ unative_t vv;
+
+ vv = v & NBYTES(0x80);
+ vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */
+ return vv;
+ }
+
+
+ static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+ {
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ ^= wd$$;
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ w1$$ ^= w2$$;
+ wq$$ = w1$$ ^ wd$$;
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+ }
+
+ const struct raid6_calls raid6_intx$# = {
+ raid6_int$#_gen_syndrome,
+ NULL, /* always valid */
+ "int" NSTRING "x$#",
+ 0
+ };
+
+ #endif
--- /dev/null
-PERL = perl
+ #
+ # This is a simple Makefile to test some of the RAID-6 code
+ # from userspace.
+ #
+
+ CC = gcc
+ OPTFLAGS = -O2 # Adjust as desired
+ CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
+ LD = ld
-raid6altivec1.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
++AWK = awk
+ AR = ar
+ RANLIB = ranlib
+
+ .c.o:
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+ %.c: ../%.c
+ cp -f $< $@
+
+ %.uc: ../%.uc
+ cp -f $< $@
+
+ all: raid6.a raid6test
+
+ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
+ raid6int32.o \
+ raid6mmx.o raid6sse1.o raid6sse2.o \
+ raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
+ raid6recov.o raid6algos.o \
+ raid6tables.o
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
+
+ raid6test: test.c raid6.a
+ $(CC) $(CFLAGS) -o raid6test $^
+
-raid6altivec2.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
++raid6altivec1.c: raid6altivec.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
+
-raid6altivec4.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
++raid6altivec2.c: raid6altivec.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
+
-raid6altivec8.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
++raid6altivec4.c: raid6altivec.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
+
-raid6int1.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 1 < raid6int.uc > $@
++raid6altivec8.c: raid6altivec.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
+
-raid6int2.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 2 < raid6int.uc > $@
++raid6int1.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
+
-raid6int4.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 4 < raid6int.uc > $@
++raid6int2.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
+
-raid6int8.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 8 < raid6int.uc > $@
++raid6int4.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
+
-raid6int16.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 16 < raid6int.uc > $@
++raid6int8.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
+
-raid6int32.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 32 < raid6int.uc > $@
++raid6int16.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
+
++raid6int32.c: raid6int.uc ../unroll.awk
++ $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
+
+ raid6tables.c: mktables
+ ./mktables > raid6tables.c
+
+ clean:
+ rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
+
+ spotless: clean
+ rm -f *~
--- /dev/null
--- /dev/null
++
++# This filter requires one command line option of form -vN=n
++# where n must be a decimal number.
++#
++# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
++# Replace each $# with n, and each $* with a single $.
++
++BEGIN {
++ n = N + 0
++}
++{
++ if (/\$\$/) { rep = n } else { rep = 1 }
++ for (i = 0; i < rep; ++i) {
++ tmp = $0
++ gsub(/\$\$/, i, tmp)
++ gsub(/\$\#/, n, tmp)
++ gsub(/\$\*/, "$", tmp)
++ print tmp
++ }
++}