prepare for the transition to linux 2.6.22 - make it possible to override the kernel version in the target makefile
prepare for the transition to linux 2.6.22 - make it possible to override the kernel version in the target makefile

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@7644 3c298f89-4303-0410-b956-a3cf2f4a3e73

--- a/include/kernel-build.mk
+++ b/include/kernel-build.mk
@@ -11,9 +11,11 @@
 include $(INCLUDE_DIR)/kernel.mk
 include $(INCLUDE_DIR)/prereq.mk
 
-LINUX_CONFIG ?= ./config/default
+GENERIC_LINUX_CONFIG:=$(GENERIC_PLATFORM_DIR)/config-$(shell [ -f "$(GENERIC_PLATFORM_DIR)/config-$(KERNEL_PATCHVER)" ] && echo "$(KERNEL_PATCHVER)" || echo template ) 
+LINUX_CONFIG_DIR ?= ./config$(shell [ -d "./config-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
+LINUX_CONFIG ?= $(LINUX_CONFIG_DIR)/default
 
--include $(TOPDIR)/target/linux/generic-$(KERNEL)/config-template
+-include $(GENERIC_LINUX_CONFIG)
 -include $(LINUX_CONFIG)
 
 ifneq ($(CONFIG_ATM),)
@@ -166,9 +168,9 @@
 compile: $(LINUX_DIR)/.modules
 menuconfig: $(LINUX_DIR)/.prepared FORCE
 	$(call Kernel/Configure)
-	$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_CONFIG) > $(LINUX_DIR)/.config
+	$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) $(LINUX_CONFIG) > $(LINUX_DIR)/.config
 	$(MAKE) -C $(LINUX_DIR) $(KERNEL_MAKEOPTS) menuconfig
-	$(SCRIPT_DIR)/config.pl '>' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_DIR)/.config > $(LINUX_CONFIG)
+	$(SCRIPT_DIR)/config.pl '>' $(GENERIC_LINUX_CONFIG) $(LINUX_DIR)/.config > $(LINUX_CONFIG)
 
 install: $(LINUX_DIR)/.image
 

--- a/include/kernel-defaults.mk
+++ b/include/kernel-defaults.mk
@@ -47,9 +47,9 @@
 endef
 define Kernel/Configure/Default
 	@if [ -f "./config/profile-$(PROFILE)" ]; then \
-		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template '+' $(LINUX_CONFIG) ./config/profile-$(PROFILE) > $(LINUX_DIR)/.config; \
+		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) '+' $(LINUX_CONFIG) ./config/profile-$(PROFILE) > $(LINUX_DIR)/.config; \
 	else \
-		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_CONFIG) > $(LINUX_DIR)/.config; \
+		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) $(LINUX_CONFIG) > $(LINUX_DIR)/.config; \
 	fi
 	$(call Kernel/Configure/$(KERNEL))
 	rm -rf $(KERNEL_BUILD_DIR)/modules

--- a/include/kernel.mk
+++ b/include/kernel.mk
@@ -26,8 +26,11 @@
     KERNEL_CROSS:=$(TARGET_CROSS)
   endif
 
+  KERNEL_PATCHVER:=$(shell echo $(LINUX_VERSION) | cut -d. -f1,2,3 | cut -d- -f1)
   PLATFORM_DIR := $(TOPDIR)/target/linux/$(BOARD)-$(KERNEL)
+  PATCH_DIR := ./patches$(shell [ -d "./patches-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
   GENERIC_PLATFORM_DIR := $(TOPDIR)/target/linux/generic-$(KERNEL)
+  GENERIC_PATCH_DIR := $(GENERIC_PLATFORM_DIR)/patches$(shell [ -d "$(GENERIC_PLATFORM_DIR)/patches-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
   KERNEL_BUILD_DIR:=$(BUILD_DIR)/linux-$(KERNEL)-$(BOARD)
   LINUX_DIR := $(KERNEL_BUILD_DIR)/linux-$(LINUX_VERSION)
 
@@ -37,10 +40,11 @@
   LINUX_KERNEL:=$(KERNEL_BUILD_DIR)/vmlinux
 
   LINUX_SOURCE:=linux-$(LINUX_VERSION).tar.bz2
-  LINUX_SITE:=http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.de.kernel.org/pub/linux/kernel/v$(KERNEL)
+  TESTING:=$(if $(findstring -rc,$(LINUX_VERSION)),/testing,)
+  LINUX_SITE:=http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.de.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING)
 
   PKG_BUILD_DIR ?= $(KERNEL_BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
 

--- a/include/quilt.mk
+++ b/include/quilt.mk
@@ -46,11 +46,11 @@
 define Kernel/Patch/Default
 	if [ -d $(GENERIC_PLATFORM_DIR)/files ]; then $(CP) $(GENERIC_PLATFORM_DIR)/files/* $(LINUX_DIR)/; fi
 	if [ -d ./files ]; then $(CP) ./files/* $(LINUX_DIR)/; fi
-	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(GENERIC_PLATFORM_DIR)/patches,generic/), \
-		if [ -d $(GENERIC_PLATFORM_DIR)/patches ]; then $(PATCH) $(LINUX_DIR) $(GENERIC_PLATFORM_DIR)/patches; fi \
+	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(GENERIC_PATCH_DIR),generic/), \
+		if [ -d $(GENERIC_PATCH_DIR) ]; then $(PATCH) $(LINUX_DIR) $(GENERIC_PATCH_DIR); fi \
 	)
-	$(if $(strip $(QUILT)),$(call Quilt/Patch,./patches,platform/), \
-		if [ -d ./patches ]; then $(PATCH) $(LINUX_DIR) ./patches; fi \
+	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(PATCH_DIR),platform/), \
+		if [ -d $(PATCH_DIR) ]; then $(PATCH) $(LINUX_DIR) $(PATCH_DIR); fi \
 	)
 	$(if $(strip $(QUILT)),touch $(PKG_BUILD_DIR)/.quilt_used)
 endef
@@ -79,8 +79,8 @@
 		echo "All kernel patches must start with either generic/ or platform/"; \
 		false; \
 	}
-	$(call Quilt/RefreshDir,$(GENERIC_PLATFORM_DIR)/patches,generic/)
-	$(call Quilt/RefreshDir,./patches,platform/)
+	$(call Quilt/RefreshDir,$(GENERIC_PATCH_DIR),generic/)
+	$(call Quilt/RefreshDir,$(PATCH_DIR),platform/)
 endef
 
 quilt-check: $(STAMP_PREPARED) FORCE

--- a/target/linux/adm5120-2.6/config/default
+++ b/target/linux/adm5120-2.6/config/default
@@ -3,6 +3,7 @@
 # CONFIG_64BIT_PHYS_ADDR is not set
 CONFIG_ADM5120_GPIO=y
 CONFIG_ADM5120_NR_UARTS=2
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_BASE_SMALL=0
@@ -62,7 +63,7 @@
 # CONFIG_GEN_RTC is not set
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
-CONFIG_HID=y
+CONFIG_HID=m
 CONFIG_HWMON=y
 # CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_HW_HAS_PCI=y
@@ -96,6 +97,7 @@
 # CONFIG_JOLIET is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -175,7 +177,9 @@
 # CONFIG_MTD_NAND_DISKONCHIP is not set
 # CONFIG_MTD_NAND_ECC_SMC is not set
 CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
 # CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
 CONFIG_MTD_NAND_RB100=y
 # CONFIG_MTD_NAND_VERIFY_WRITE is not set
 # CONFIG_MTD_OBSOLETE_CHIPS is not set
@@ -199,6 +203,7 @@
 # CONFIG_NET_PKTGEN is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set

--- a/target/linux/adm5120eb-2.6/config/default
+++ b/target/linux/adm5120eb-2.6/config/default
@@ -3,6 +3,7 @@
 # CONFIG_64BIT_PHYS_ADDR is not set
 CONFIG_ADM5120_GPIO=y
 CONFIG_ADM5120_NR_UARTS=2
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_BASE_SMALL=0
@@ -60,9 +61,9 @@
 CONFIG_GENERIC_GPIO=y
 # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
 # CONFIG_GEN_RTC is not set
+CONFIG_HID=m
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
-CONFIG_HID=y
 CONFIG_HWMON=y
 # CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_HW_HAS_PCI=y
@@ -96,6 +97,7 @@
 # CONFIG_JOLIET is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -194,6 +196,7 @@
 # CONFIG_NET_PKTGEN is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set

--- a/target/linux/amcc-2.6/config/default
+++ b/target/linux/amcc-2.6/config/default
@@ -4,6 +4,7 @@
 # CONFIG_6xx is not set
 # CONFIG_8139TOO is not set
 # CONFIG_8xx is not set
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_BAMBOO is not set
 # CONFIG_E200 is not set
 # CONFIG_E500 is not set
@@ -65,6 +66,7 @@
 CONFIG_KERNEL_START=0xc0000000
 # CONFIG_KEXEC is not set
 CONFIG_LOWMEM_SIZE=0x30000000
+# CONFIG_MACINTOSH_DRIVERS is not set
 CONFIG_MATH_EMULATION=y
 CONFIG_MINI_FO=y
 CONFIG_MTD=y
@@ -123,6 +125,7 @@
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NOT_COHERENT_CACHE=y
 # CONFIG_NVRAM is not set
 # CONFIG_PCIPCWATCHDOG is not set

--- a/target/linux/ar7-2.6/config/default
+++ b/target/linux/ar7-2.6/config/default
@@ -6,6 +6,7 @@
 CONFIG_AR7_WDT=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ATM_DRIVERS=y
 # CONFIG_ATMEL is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_BCM43XX is not set
@@ -73,6 +74,7 @@
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_LEDS_AR7=y
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -159,6 +161,7 @@
 # CONFIG_NET_PCI is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NODES_SHIFT=6
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y

--- a/target/linux/aruba-2.6/config/default
+++ b/target/linux/aruba-2.6/config/default
@@ -3,6 +3,7 @@
 # CONFIG_64BIT_PHYS_ADDR is not set
 # CONFIG_8139TOO is not set
 CONFIG_AR2313=y
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_B44 is not set
 CONFIG_BASE_SMALL=0
 CONFIG_CPU_BIG_ENDIAN=y
@@ -63,6 +64,7 @@
 # CONFIG_IPW2200 is not set
 CONFIG_JFFS2_FS_DEBUG=0
 # CONFIG_LAN_SAA9730 is not set
+# CONFIG_MACH_ALCHEMY is not set
 CONFIG_MACH_ARUBA=y
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
@@ -155,6 +157,7 @@
 CONFIG_NATSEMI=y
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
@@ -191,6 +194,7 @@
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set

--- a/target/linux/atheros-2.6/config/default
+++ b/target/linux/atheros-2.6/config/default
@@ -60,6 +60,7 @@
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_IRQ_CPU=y
 CONFIG_JFFS2_FS_DEBUG=0
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -149,6 +150,7 @@
 # CONFIG_MTD_SLRAM is not set
 CONFIG_MTD_SPIFLASH=y
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set

--- a/target/linux/au1000-2.6/config/default
+++ b/target/linux/au1000-2.6/config/default
@@ -77,6 +77,7 @@
 CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_LEDS_MTX1=y
 # CONFIG_LEDS_TRIGGERS is not set
+CONFIG_MACH_ALCHEMY=y
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -164,6 +165,7 @@
 # CONFIG_NE2K_PCI is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_MARK=y
 CONFIG_NF_CT_ACCT=y
@@ -228,6 +230,7 @@
 CONFIG_SYS_SUPPORTS_KGDB=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
 # CONFIG_TMD_HERMES is not set
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set
@@ -244,6 +247,7 @@
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_UHCI_HCD=m
 # CONFIG_VIA_RHINE is not set
+CONFIG_WDT_MTX1=y
 CONFIG_ZONE_DMA=y
 CONFIG_ZONE_DMA_FLAG=1
 

--- a/target/linux/avr32-2.6/config/default
+++ b/target/linux/avr32-2.6/config/default
@@ -2,6 +2,7 @@
 # CONFIG_AP7000_32_BIT_SMC is not set
 # CONFIG_AP7000_8_BIT_SMC is not set
 # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ATM_DRIVERS is not set
 CONFIG_AVR32=y
 CONFIG_BOARD_ATNGW100=y
 # CONFIG_BOARD_ATSTK1000 is not set
@@ -52,6 +53,7 @@
 # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
 # CONFIG_MTD_MTDRAM is not set
 # CONFIG_MTD_NAND is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_MTD_OBSOLETE_CHIPS is not set
 # CONFIG_MTD_ONENAND is not set
 CONFIG_MTD_PARTITIONS=y
@@ -80,6 +82,7 @@
 # CONFIG_SPI_BITBANG is not set
 # CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
+# CONFIG_SPI_SPIDEV is not set
 CONFIG_SPI=y
 CONFIG_SUBARCH_AVR32B=y
 # CONFIG_UNUSED_SYMBOLS is not set

--- a/target/linux/brcm47xx-2.6/config/default
+++ b/target/linux/brcm47xx-2.6/config/default
@@ -5,6 +5,7 @@
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 # CONFIG_ARPD is not set
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ATMEL is not set
 CONFIG_B44=y
 CONFIG_BASE_SMALL=0
@@ -117,6 +118,7 @@
 CONFIG_JFFS2_FS_DEBUG=0
 # CONFIG_LIBCRC32C is not set
 # CONFIG_LLC2 is not set
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -198,6 +200,7 @@
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
+CONFIG_NETDEV_1000=y
 # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
 # CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
 # CONFIG_NETFILTER_XT_MATCH_DCCP is not set
@@ -260,6 +263,7 @@
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set

--- a/target/linux/brcm63xx-2.6/config/default
+++ b/target/linux/brcm63xx-2.6/config/default
@@ -4,6 +4,7 @@
 # CONFIG_8139TOO is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ATM_DRIVERS=y
 CONFIG_AUDIT=y
 CONFIG_AUDIT_GENERIC=y
 CONFIG_BASE_SMALL=0
@@ -116,14 +117,7 @@
 # CONFIG_IDE is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
-CONFIG_INPUT=m
-# CONFIG_INPUT_EVDEV is not set
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_INPUT_MOUSE=y
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT is not set
 CONFIG_IOSCHED_CFQ=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IP6_NF_MATCH_FRAG is not set
@@ -165,10 +159,12 @@
 # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
 # CONFIG_LLC2 is not set
 CONFIG_LXT_PHY=m
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
 # CONFIG_MAC_PARTITION is not set
+# CONFIG_MAC80211_DEBUGFS is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_MARVELL_PHY=m
 CONFIG_MII=m
@@ -206,9 +202,6 @@
 # CONFIG_MOMENCO_OCELOT_3 is not set
 # CONFIG_MOMENCO_OCELOT_C is not set
 # CONFIG_MOMENCO_OCELOT_G is not set
-CONFIG_MOUSE_PS2=m
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
 CONFIG_MTD=y
 # CONFIG_MTD_ABSENT is not set
 CONFIG_MTD_BCM963XX=y
@@ -277,6 +270,7 @@
 # CONFIG_NET_SCH_CLK_JIFFIES is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NLS=y
 CONFIG_NLS_ASCII=m
 # CONFIG_PAGE_SIZE_16KB is not set
@@ -332,6 +326,7 @@
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+# CONFIG_TC35815 is not set
 CONFIG_TCP_CONG_BIC=y
 # CONFIG_TCP_CONG_HSTCP is not set
 # CONFIG_TCP_CONG_HYBLA is not set

--- /dev/null
+++ b/target/linux/generic-2.6/config-2.6.22
@@ -1,1 +1,1598 @@
+# CONFIG_6PACK is not set
+# CONFIG_8139CP is not set
+# CONFIG_9P_FS is not set
+# CONFIG_ACENIC is not set
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_AIRO=m
+CONFIG_AIRO_CS=m
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_AMIGA_PARTITION is not set
+CONFIG_ANON_INODES=y
+# CONFIG_APPLICOM is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCNET is not set
+CONFIG_ARPD=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_ATA is not set
+# CONFIG_ATALK is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_ATL1 is not set
+CONFIG_ATM=m
+CONFIG_ATMEL=m
+# CONFIG_ATM_AMBASSADOR is not set
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_CLIP_NO_ICMP=y
+CONFIG_ATM_DUMMY=m
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_LANAI is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+# CONFIG_ATM_NICSTAR is not set
+CONFIG_ATM_TCP=m
+# CONFIG_ATM_ZATM is not set
+# CONFIG_AUDIT is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AX25=m
+# CONFIG_AX25_DAMA_SLAVE is not set
+# CONFIG_B44 is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BASE_FULL=y
+# CONFIG_BASLER_EXCITE is not set
+# CONFIG_BAYCOM_SER_FDX is not set
+# CONFIG_BAYCOM_SER_HDX is not set
+CONFIG_BCM43XX=m
+CONFIG_BCM43XX_DEBUG=y
+CONFIG_BCM43XX_DMA=y
+CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
+# CONFIG_BCM43XX_DMA_MODE is not set
+CONFIG_BCM43XX_PIO=y
+# CONFIG_BCM43XX_PIO_MODE is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_BLINK is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLOCK=y
+# CONFIG_BNX2 is not set
+CONFIG_BONDING=m
+# CONFIG_BPQETHER is not set
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_BT=m
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+# CONFIG_BT_CMTP is not set
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBFUSB=m
+# CONFIG_BT_HCIBLUECARD is not set
+CONFIG_BT_HCIBPA10X=m
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIDTL1 is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_SCO=m
+CONFIG_BUG=y
+# CONFIG_CAPI_AVM is not set
+# CONFIG_CAPI_EICON is not set
+# CONFIG_CAPI_TRACE is not set
+CONFIG_CARDBUS=y
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_CASSINI is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_CFG80211=m
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CICADA_PHY is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_STATS=y
+# CONFIG_CIFS_STATS2 is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+CONFIG_CLS_U32_MARK=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CMDLINE=""
+# CONFIG_CODA_FS is not set
+CONFIG_CONFIGFS_FS=y
+# CONFIG_CONNECTOR is not set
+# CONFIG_CRAMFS is not set
+CONFIG_CRC16=m
+CONFIG_CRC32=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CROSSCOMPILE=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_WP512=m
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_DAB is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DECNET is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_BIC is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_CUBIC is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_HTCP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_DEFAULT_NOOP is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="vegas"
+CONFIG_DEFAULT_VEGAS=y
+# CONFIG_DEFAULT_WESTWOOD is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DGRS is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_DL2K is not set
+# CONFIG_DLM is not set
+# CONFIG_DMA_ENGINE is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_DRM is not set
+# CONFIG_DTLK is not set
+# CONFIG_DUMMY is not set
+# CONFIG_DVB is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_E100=m
+# CONFIG_E1000 is not set
+# CONFIG_ECONET is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_EPIC100 is not set
+CONFIG_EPOLL=y
+# CONFIG_EQUALIZER is not set
+CONFIG_EVENTFD=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_EXPORTFS=m
+CONFIG_EXT2_FS=m
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=m
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_FS=m
+# CONFIG_FB is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FDDI is not set
+# CONFIG_FEALNX is not set
+CONFIG_FIB_RULES=y
+# CONFIG_FIREWIRE is not set
+CONFIG_FLATMEM=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_FORCEDETH is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_FTL is not set
+# CONFIG_FUSE_FS is not set
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+# CONFIG_FUSION_SPI is not set
+CONFIG_FUTEX=y
+CONFIG_FW_LOADER=y
+CONFIG_GACT_PROB=y
+# CONFIG_GAMEPORT is not set
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_GFS2_FS is not set
+# CONFIG_HAMACHI is not set
+CONFIG_HAMRADIO=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_HERMES=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HFS_FS=m
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HID_FF is not set
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_HIPPI is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_CS=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PCI=m
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOTPLUG=y
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HP100 is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+CONFIG_HZ=100
+CONFIG_HZ_100=y
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_48 is not set
+# CONFIG_I2C_ALGOPCA is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_ELEKTOR is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2O is not set
+# CONFIG_I82092 is not set
+# CONFIG_IEEE1394 is not set
+CONFIG_IEEE80211=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_IEEE80211_CRYPT_WEP=m
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_SOFTMAC=m
+# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+CONFIG_IFB=m
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMQ=m
+# CONFIG_IMQ_BEHAVIOR_AA is not set
+# CONFIG_IMQ_BEHAVIOR_AB is not set
+CONFIG_IMQ_BEHAVIOR_BA=y
+# CONFIG_IMQ_BEHAVIOR_BB is not set
+CONFIG_IMQ_NUM_DEVS=2
+CONFIG_INET=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET_AH=m
+CONFIG_INET_DCCP_DIAG=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_TUNNEL=m
+# CONFIG_INFINIBAND is not set
+# CONFIG_INFTL is not set
+CONFIG_INIT_ENV_ARG_LIMIT=32
+# CONFIG_INOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_INPUT is not set
+# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MISC is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+# CONFIG_IP6_NF_MATCH_MH is not set
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_TARGET_IMQ=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_ROUTE=m
+# CONFIG_IPC_NS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_IPSEC_NAT_TRAVERSAL=y
+CONFIG_IPV6=m
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_IPV6_PRIVACY is not set
+CONFIG_IPV6_ROUTER_PREF=y
+# CONFIG_IPV6_ROUTE_INFO is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_TUNNEL is not set
+CONFIG_IPW2100=m
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2100_MONITOR=y
+CONFIG_IPW2200=m
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_IPW2200_MONITOR=y
+# CONFIG_IPW2200_PROMISCUOUS is not set
+# CONFIG_IPW2200_QOS is not set
+# CONFIG_IPW2200_RADIOTAP is not set
+# CONFIG_IPX is not set
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_DCCP=m
+CONFIG_IP_DCCP_ACKVEC=y
+CONFIG_IP_DCCP_CCID2=m
+# CONFIG_IP_DCCP_CCID2_DEBUG is not set
+CONFIG_IP_DCCP_CCID3=m
+# CONFIG_IP_DCCP_CCID3_DEBUG is not set
+CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+# CONFIG_IP_MROUTE is not set
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_CONNTRACK=y
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
+CONFIG_IP_NF_CT_ACCT=y
+CONFIG_IP_NF_CT_PROTO_SCTP=m
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_H323=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_MATCH_IPP2P=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_LAYER7=m
+# CONFIG_IP_NF_MATCH_LAYER7_DEBUG is not set
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_SET=m
+CONFIG_IP_NF_MATCH_TIME=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_H323=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_NAT_PPTP=m
+CONFIG_IP_NF_NAT_SIP=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_TFTP=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_PPTP=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SET=m
+CONFIG_IP_NF_SET_HASHSIZE=1024
+CONFIG_IP_NF_SET_IPHASH=m
+CONFIG_IP_NF_SET_IPMAP=m
+CONFIG_IP_NF_SET_IPPORTHASH=m
+CONFIG_IP_NF_SET_IPTREE=m
+CONFIG_IP_NF_SET_MACIPMAP=m
+CONFIG_IP_NF_SET_MAX=256
+CONFIG_IP_NF_SET_NETHASH=m
+CONFIG_IP_NF_SET_PORTMAP=m
+CONFIG_IP_NF_SIP=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_IMQ=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_ROUTE=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_TARGET_SET=m
+CONFIG_IP_NF_TARGET_TCPMSS=y
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_PNP is not set
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_MULTIPATH_CACHED=y
+CONFIG_IP_ROUTE_MULTIPATH_DRR=m
+CONFIG_IP_ROUTE_MULTIPATH_RANDOM=m
+CONFIG_IP_ROUTE_MULTIPATH_RR=m
+CONFIG_IP_ROUTE_MULTIPATH_WRANDOM=m
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_SCTP is not set
+# CONFIG_IP_VS is not set
+# CONFIG_IRDA is not set
+# CONFIG_ISCSI_TCP is not set
+CONFIG_ISDN=m
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIFS=m
+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
+# CONFIG_ISDN_I4L is not set
+CONFIG_ISO9660_FS=m
+# CONFIG_IXGB is not set
+CONFIG_JBD=m
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_SUMMARY is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_SECURITY is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_JOLIET=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_KEXEC is not set
+# CONFIG_KEYS is not set
+# CONFIG_KMOD is not set
+# CONFIG_LAPB is not set
+# CONFIG_LASAT is not set
+# CONFIG_LBD is not set
+# CONFIG_LDM_PARTITION is not set
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_LIBCRC32C=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LLC=y
+CONFIG_LLC2=m
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_LOCKD=m
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_LOCKD_V4=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_LSF is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_MAC80211_DEBUG is not set
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211=m
+# CONFIG_MAC_EMUMOUSEBTN is not set
+CONFIG_MAC_PARTITION=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_MARKEINS is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_MD is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_MFD_SM501 is not set
+CONFIG_MII=y
+CONFIG_MINIX_FS=m
+# CONFIG_MINIX_SUBPARTITION is not set
+CONFIG_MINI_FO=y
+CONFIG_MKISS=m
+# CONFIG_MMC is not set
+CONFIG_MMU=y
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_MSDOS_FS=m
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_MTD_NAND is not set
+CONFIG_MTD_SPLIT_ROOTFS=y
+# CONFIG_MTD_UBI is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NCP_FS is not set
+CONFIG_NET=y
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETDEBUG is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEV_10000 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PORTSCAN=m
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_CHAOS=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_DELUDE=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+# CONFIG_NETPOLL is not set
+# CONFIG_NETROM is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_POLICE=m
+# CONFIG_NET_ACT_SIMP is not set
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_ETHERNET=y
+# CONFIG_NET_FC is not set
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_NET_PCI=y
+# CONFIG_NET_PCMCIA is not set
+CONFIG_NET_PKTGEN=m
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_NET_RADIO=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_CBQ=m
+# CONFIG_NET_SCH_CLK_CPU is not set
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+CONFIG_NET_SCH_CLK_JIFFIES=y
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_ESFQ=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_TEQL=m
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_WIRELESS=y
+CONFIG_NET_WIRELESS_RTNETLINK=y
+CONFIG_NEW_LEDS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_TCP=y
+# CONFIG_NFSD_V2_ACL is not set
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+# CONFIG_NFS_ACL_SUPPORT is not set
+CONFIG_NFS_COMMON=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFTL is not set
+CONFIG_NF_CONNTRACK=y
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_ENABLED=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_NF_CONNTRACK_SANE is not set
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_SUPPORT=y
+# CONFIG_NF_CONNTRACK_TFTP is not set
+CONFIG_NF_CT_ACCT=y
+# CONFIG_NF_CT_PROTO_SCTP is not set
+CONFIG_NF_NAT=y
+# CONFIG_NF_NAT_AMANDA is not set
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_PPTP is not set
+CONFIG_NF_NAT_SIP=m
+# CONFIG_NF_NAT_SNMP_BASIC is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NLS=m
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_CODEPAGE_1250=m
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+CONFIG_NLS_KOI8_R=m
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+CONFIG_NO_HZ=y
+CONFIG_NORTEL_HERMES=m
+# CONFIG_NS83820 is not set
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+# CONFIG_OCFS2_FS is not set
+# CONFIG_OSF_PARTITION is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+# CONFIG_PARPORT is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_ISAPNP is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PCMCIA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+CONFIG_PATA_PLATFORM=m
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+# CONFIG_PC300TOO is not set
+# CONFIG_PCCARD is not set
+CONFIG_PCI=y
+CONFIG_PCI_ATMEL=m
+CONFIG_PCI_HERMES=m
+# CONFIG_PCMCIA is not set
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_DEBUG is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_HERMES is not set
+# CONFIG_PCMCIA_IOCTL is not set
+# CONFIG_PCMCIA_LOAD_CIS is not set
+# CONFIG_PCMCIA_NETWAVE is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+# CONFIG_PCMCIA_WAVELAN is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_PD6729 is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_PHONE is not set
+# CONFIG_PHYLIB is not set
+CONFIG_PLIST=y
+CONFIG_PLX_HERMES=m
+# CONFIG_PM is not set
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_PPP=m
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_PRINTK=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_PRISM54=m
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROFILING is not set
+# CONFIG_QEMU is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_R3964 is not set
+# CONFIG_R8169 is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_RADIO_AZTECH is not set
+# CONFIG_RADIO_CADET is not set
+# CONFIG_RADIO_GEMTEK is not set
+# CONFIG_RADIO_GEMTEK_PCI is not set
+# CONFIG_RADIO_MAESTRO is not set
+# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_RTRACK is not set
+# CONFIG_RADIO_RTRACK2 is not set
+# CONFIG_RADIO_SF16FMI is not set
+# CONFIG_RADIO_SF16FMR2 is not set
+# CONFIG_RADIO_TERRATEC is not set
+# CONFIG_RADIO_TRUST is not set
+# CONFIG_RADIO_TYPHOON is not set
+# CONFIG_RADIO_ZOLTRIX is not set
+# CONFIG_RAID_ATTRS is not set
+CONFIG_RAMFS=y
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_RELAY is not set
+# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_RFKILL is not set
+CONFIG_ROMFS_FS=m
+# CONFIG_ROSE is not set
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_MAX6900 is not set
+CONFIG_RT_MUTEXES=y
+# CONFIG_S2IO is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SC92031 is not set
+CONFIG_SCSI=m
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_ESP_CORE is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_LPFC is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_PAS16 is not set
+CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SECURITY is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=2
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_SERIAL_UARTLITE is not set
+# CONFIG_SERIO is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_SHAPER is not set
+CONFIG_SHMEM=y
+CONFIG_SIGNALFD=y
+# CONFIG_SIS190 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+CONFIG_SLHC=m
+# CONFIG_SLIP is not set
+# CONFIG_SLOB is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_SND=m
+# CONFIG_SND_AD1816A is not set
+# CONFIG_SND_AD1848 is not set
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ADLIB is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ALS100 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AZT2320 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMI8330 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_CS4231 is not set
+# CONFIG_SND_CS4232 is not set
+# CONFIG_SND_CS4236 is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DT019X is not set
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1688 is not set
+# CONFIG_SND_ES18XX is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_ES968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_GUSCLASSIC is not set
+# CONFIG_SND_GUSEXTREME is not set
+# CONFIG_SND_GUSMAX is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+CONFIG_SND_HWDEP=m
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_INTERWAVE is not set
+# CONFIG_SND_INTERWAVE_STB is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_MIRO is not set
+# CONFIG_SND_MIXART is not set
+CONFIG_SND_MIXER_OSS=m
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_OPL3SA2 is not set
+# CONFIG_SND_OPTI92X_AD1848 is not set
+# CONFIG_SND_OPTI92X_CS4231 is not set
+# CONFIG_SND_OPTI93X is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_PCM=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_PDAUDIOCF is not set
+CONFIG_SND_RAWMIDI=m
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_SB16 is not set
+# CONFIG_SND_SB8 is not set
+# CONFIG_SND_SBAWE is not set
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_SGALAXY is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_SSCAPE is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_TIMER=m
+# CONFIG_SND_TRIDENT is not set
+CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_WAVEFRONT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+CONFIG_SOUND=m
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_SQUASHFS_VMALLOC is not set
+# CONFIG_SSFDC is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_STANDALONE=y
+# CONFIG_STRIP is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_SUNGEM is not set
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_SUNRPC_GSS=m
+# CONFIG_SUN_PARTITION is not set
+CONFIG_SWAP=y
+# CONFIG_SYNCLINK_CS is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_SYSCTL=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSFS=y
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_SYSVIPC=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_VEGAS=y
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_WESTWOOD=m
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_TEXTSEARCH_KMP=m
+# CONFIG_TIFM_CORE is not set
+# CONFIG_TIGON3 is not set
+CONFIG_TIMERFD=y
+# CONFIG_TINY_SHMEM is not set
+# CONFIG_TIPC is not set
+# CONFIG_TLAN is not set
+CONFIG_TMD_HERMES=m
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TR is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_TUN=m
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+# CONFIG_UFS_FS is not set
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_UNIX=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_USB=m
+# CONFIG_USB_ACECAD is not set
+CONFIG_USB_ACM=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AIPTEK is not set
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_APPLETOUCH is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARMLINUX=y
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+CONFIG_USB_ATM=m
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_BANDWIDTH is not set
+CONFIG_USB_BELKIN=y
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_CATC=m
+# CONFIG_USB_CXACRU is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_DABUSB is not set
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EPSON2888 is not set
+CONFIG_USB_EZUSB=y
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_GADGET is not set
+CONFIG_USB_HID=m
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_KAWETH=m
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LIBUSUAL is not set
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_MOUSE is not set
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+# CONFIG_USB_OHCI_HCD is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_OTG is not set
+CONFIG_USB_PEGASUS=m
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_POWERMATE is not set
+CONFIG_USB_PRINTER=m
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_RTL8150=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_AIRPRIME=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+# CONFIG_USB_SERIAL_DEBUG is not set
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_XIRCOM=m
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_SL811_HCD is not set
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_DATAFAB=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_USBAT=y
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_USBNET_MII=m
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_XUSBATM is not set
+CONFIG_USB_YEALINK=m
+CONFIG_USB_ZD1201=m
+# CONFIG_UTS_NS is not set
+CONFIG_VFAT_FS=m
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_VIDEO_CX25840 is not set
+# CONFIG_VIDEO_CX88 is not set
+CONFIG_VIDEO_DEV=m
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_VIDEO_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_VT is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_W1 is not set
+# CONFIG_WAN is not set
+# CONFIG_WAN_ROUTER is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+# CONFIG_WDTPCI is not set
+CONFIG_WIRELESS_EXT=y
+CONFIG_WLAN_80211=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_X25 is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_USER=m
+CONFIG_XFS_FS=m
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_YAM is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_YENTA=m
+# CONFIG_YENTA_O2 is not set
+# CONFIG_YENTA_RICOH is not set
+# CONFIG_YENTA_TI is not set
+# CONFIG_YENTA_TOSHIBA is not set
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=m
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZONE_DMA=y
+CONFIG_ZONE_DMA_FLAG=1
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/001-squashfs.patch
@@ -1,1 +1,4169 @@
+diff -urN linux-2.6.21.1.old/fs/Kconfig linux-2.6.21.1.dev/fs/Kconfig
+--- linux-2.6.21.1.old/fs/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/Kconfig	2007-05-26 19:00:37.121351760 +0200
+@@ -1367,6 +1367,71 @@
+ 
+ 	  If unsure, say N.
+ 
++config SQUASHFS
++	tristate "SquashFS 3.0 - Squashed file system support"
++	select ZLIB_INFLATE
++	help
++	  Saying Y here includes support for SquashFS 3.0 (a Compressed Read-Only File
++	  System).  Squashfs is a highly compressed read-only filesystem for Linux.
++	  It uses zlib compression to compress both files, inodes and directories.
++	  Inodes in the system are very small and all blocks are packed to minimise
++	  data overhead. Block sizes greater than 4K are supported up to a maximum of 64K.
++	  SquashFS 3.0 supports 64 bit filesystems and files (larger than 4GB), full
++	  uid/gid information, hard links and timestamps.
++
++	  Squashfs is intended for general read-only filesystem use, for archival
++	  use (i.e. in cases where a .tar.gz file may be used), and in embedded
++	  systems where low overhead is needed.  Further information and filesystem tools
++	  are available from http://squashfs.sourceforge.net.
++
++	  If you want to compile this as a module ( = code which can be
++	  inserted in and removed from the running kernel whenever you want),
++	  say M here and read <file:Documentation/modules.txt>.  The module
++	  will be called squashfs.  Note that the root file system (the one
++	  containing the directory /) cannot be compiled as a module.
++
++	  If unsure, say N.
++
++config SQUASHFS_EMBEDDED
++
++	bool "Additional options for memory-constrained systems"
++	depends on SQUASHFS
++	default n
++	help
++	  Saying Y here allows you to specify cache sizes and how Squashfs
++	  allocates memory.  This is only intended for memory constrained
++	  systems.
++
++	  If unsure, say N.
++
++config SQUASHFS_FRAGMENT_CACHE_SIZE
++	int "Number of fragments cached" if SQUASHFS_EMBEDDED
++	depends on SQUASHFS
++	default "3"
++	help
++	  By default SquashFS caches the last 3 fragments read from
++	  the filesystem.  Increasing this amount may mean SquashFS
++	  has to re-read fragments less often from disk, at the expense
++	  of extra system memory.  Decreasing this amount will mean
++	  SquashFS uses less memory at the expense of extra reads from disk.
++
++	  Note there must be at least one cached fragment.  Anything
++	  much more than three will probably not make much difference.
++
++config SQUASHFS_VMALLOC
++	bool "Use Vmalloc rather than Kmalloc" if SQUASHFS_EMBEDDED
++	depends on SQUASHFS
++	default n
++	help
++	  By default SquashFS uses kmalloc to obtain fragment cache memory.
++	  Kmalloc memory is the standard kernel allocator, but it can fail
++	  on memory constrained systems.  Because of the way Vmalloc works,
++	  Vmalloc can succeed when kmalloc fails.  Specifying this option
++	  will make SquashFS always use Vmalloc to allocate the
++	  fragment cache memory.
++
++	  If unsure, say N.
++
+ config VXFS_FS
+ 	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
+ 	depends on BLOCK
+diff -urN linux-2.6.21.1.old/fs/Makefile linux-2.6.21.1.dev/fs/Makefile
+--- linux-2.6.21.1.old/fs/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/Makefile	2007-05-26 19:00:37.121351760 +0200
+@@ -72,6 +72,7 @@
+ obj-$(CONFIG_JBD2)		+= jbd2/
+ obj-$(CONFIG_EXT2_FS)		+= ext2/
+ obj-$(CONFIG_CRAMFS)		+= cramfs/
++obj-$(CONFIG_SQUASHFS)		+= squashfs/
+ obj-$(CONFIG_RAMFS)		+= ramfs/
+ obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
+ obj-$(CONFIG_CODA_FS)		+= coda/
+diff -urN linux-2.6.21.1.old/fs/squashfs/inode.c linux-2.6.21.1.dev/fs/squashfs/inode.c
+--- linux-2.6.21.1.old/fs/squashfs/inode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/inode.c	2007-05-26 19:00:37.123351456 +0200
+@@ -0,0 +1,2122 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * inode.c
++ */
++
++#include <linux/types.h>
++#include <linux/squashfs_fs.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/squashfs_fs_sb.h>
++#include <linux/squashfs_fs_i.h>
++#include <linux/buffer_head.h>
++#include <linux/vfs.h>
++#include <linux/init.h>
++#include <linux/dcache.h>
++#include <linux/wait.h>
++#include <linux/zlib.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <asm/semaphore.h>
++
++#include "squashfs.h"
++
++static void squashfs_put_super(struct super_block *);
++static int squashfs_statfs(struct dentry *, struct kstatfs *);
++static int squashfs_symlink_readpage(struct file *file, struct page *page);
++static int squashfs_readpage(struct file *file, struct page *page);
++static int squashfs_readpage4K(struct file *file, struct page *page);
++static int squashfs_readdir(struct file *, void *, filldir_t);
++static struct inode *squashfs_alloc_inode(struct super_block *sb);
++static void squashfs_destroy_inode(struct inode *inode);
++static int init_inodecache(void);
++static void destroy_inodecache(void);
++static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
++				struct nameidata *);
++static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode);
++static long long read_blocklist(struct inode *inode, int index,
++				int readahead_blks, char *block_list,
++				unsigned short **block_p, unsigned int *bsize);
++static int squashfs_get_sb(struct file_system_type *, int,
++			const char *, void *, struct vfsmount *);
++
++
++static z_stream stream;
++
++static struct file_system_type squashfs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "squashfs",
++	.get_sb = squashfs_get_sb,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV
++};
++
++static unsigned char squashfs_filetype_table[] = {
++	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
++};
++
++static struct super_operations squashfs_ops = {
++	.alloc_inode = squashfs_alloc_inode,
++	.destroy_inode = squashfs_destroy_inode,
++	.statfs = squashfs_statfs,
++	.put_super = squashfs_put_super,
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
++	.readpage = squashfs_symlink_readpage
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_aops = {
++	.readpage = squashfs_readpage
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
++	.readpage = squashfs_readpage4K
++};
++
++static struct file_operations squashfs_dir_ops = {
++	.read = generic_read_dir,
++	.readdir = squashfs_readdir
++};
++
++SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
++	.lookup = squashfs_lookup
++};
++
++
++static struct buffer_head *get_block_length(struct super_block *s,
++				int *cur_index, int *offset, int *c_byte)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	unsigned short temp;
++	struct buffer_head *bh;
++
++	if (!(bh = sb_bread(s, *cur_index)))
++		goto out;
++
++	if (msblk->devblksize - *offset == 1) {
++		if (msblk->swap)
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset));
++		else
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset));
++		brelse(bh);
++		if (!(bh = sb_bread(s, ++(*cur_index))))
++			goto out;
++		if (msblk->swap)
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				bh->b_data);
++		else
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				bh->b_data);
++		*c_byte = temp;
++		*offset = 1;
++	} else {
++		if (msblk->swap) {
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset));
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset + 1));
++		} else {
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset));
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset + 1));
++		}
++		*c_byte = temp;
++		*offset += 2;
++	}
++
++	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
++		if (*offset == msblk->devblksize) {
++			brelse(bh);
++			if (!(bh = sb_bread(s, ++(*cur_index))))
++				goto out;
++			*offset = 0;
++		}
++		if (*((unsigned char *) (bh->b_data + *offset)) !=
++						SQUASHFS_MARKER_BYTE) {
++			ERROR("Metadata block marker corrupt @ %x\n",
++						*cur_index);
++			brelse(bh);
++			goto out;
++		}
++		(*offset)++;
++	}
++	return bh;
++
++out:
++	return NULL;
++}
++
++
++SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
++			long long index, unsigned int length,
++			long long *next_index)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
++			msblk->devblksize_log2) + 2];
++	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
++	unsigned int cur_index = index >> msblk->devblksize_log2;
++	int bytes, avail_bytes, b = 0, k;
++	char *c_buffer;
++	unsigned int compressed;
++	unsigned int c_byte = length;
++
++	if (c_byte) {
++		bytes = msblk->devblksize - offset;
++		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
++		c_buffer = compressed ? msblk->read_data : buffer;
++		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
++
++		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
++					? "" : "un", (unsigned int) c_byte);
++
++		if (!(bh[0] = sb_getblk(s, cur_index)))
++			goto block_release;
++
++		for (b = 1; bytes < c_byte; b++) {
++			if (!(bh[b] = sb_getblk(s, ++cur_index)))
++				goto block_release;
++			bytes += msblk->devblksize;
++		}
++		ll_rw_block(READ, b, bh);
++	} else {
++		if (!(bh[0] = get_block_length(s, &cur_index, &offset,
++								&c_byte)))
++			goto read_failure;
++
++		bytes = msblk->devblksize - offset;
++		compressed = SQUASHFS_COMPRESSED(c_byte);
++		c_buffer = compressed ? msblk->read_data : buffer;
++		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
++
++		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
++					? "" : "un", (unsigned int) c_byte);
++
++		for (b = 1; bytes < c_byte; b++) {
++			if (!(bh[b] = sb_getblk(s, ++cur_index)))
++				goto block_release;
++			bytes += msblk->devblksize;
++		}
++		ll_rw_block(READ, b - 1, bh + 1);
++	}
++
++	if (compressed)
++		down(&msblk->read_data_mutex);
++
++	for (bytes = 0, k = 0; k < b; k++) {
++		avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
++					msblk->devblksize - offset :
++					c_byte - bytes;
++		wait_on_buffer(bh[k]);
++		if (!buffer_uptodate(bh[k]))
++			goto block_release;
++		memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
++		bytes += avail_bytes;
++		offset = 0;
++		brelse(bh[k]);
++	}
++
++	/*
++	 * uncompress block
++	 */
++	if (compressed) {
++		int zlib_err;
++
++		stream.next_in = c_buffer;
++		stream.avail_in = c_byte;
++		stream.next_out = buffer;
++		stream.avail_out = msblk->read_size;
++
++		if (((zlib_err = zlib_inflateInit(&stream)) != Z_OK) ||
++				((zlib_err = zlib_inflate(&stream, Z_FINISH))
++				 != Z_STREAM_END) || ((zlib_err =
++				zlib_inflateEnd(&stream)) != Z_OK)) {
++			ERROR("zlib_fs returned unexpected result 0x%x\n",
++				zlib_err);
++			bytes = 0;
++		} else
++			bytes = stream.total_out;
++
++		up(&msblk->read_data_mutex);
++	}
++
++	if (next_index)
++		*next_index = index + c_byte + (length ? 0 :
++				(SQUASHFS_CHECK_DATA(msblk->sblk.flags)
++				 ? 3 : 2));
++	return bytes;
++
++block_release:
++	while (--b >= 0)
++		brelse(bh[b]);
++
++read_failure:
++	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
++	return 0;
++}
++
++
++SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
++				long long block, unsigned int offset,
++				int length, long long *next_block,
++				unsigned int *next_offset)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	int n, i, bytes, return_length = length;
++	long long next_index;
++
++	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
++
++	while ( 1 ) {
++		for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++			if (msblk->block_cache[i].block == block)
++				break;
++
++		down(&msblk->block_cache_mutex);
++
++		if (i == SQUASHFS_CACHED_BLKS) {
++			/* read inode header block */
++			for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
++					n ; n --, i = (i + 1) %
++					SQUASHFS_CACHED_BLKS)
++				if (msblk->block_cache[i].block !=
++							SQUASHFS_USED_BLK)
++					break;
++
++			if (n == 0) {
++				wait_queue_t wait;
++
++				init_waitqueue_entry(&wait, current);
++				add_wait_queue(&msblk->waitq, &wait);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++ 				up(&msblk->block_cache_mutex);
++				schedule();
++				set_current_state(TASK_RUNNING);
++				remove_wait_queue(&msblk->waitq, &wait);
++				continue;
++			}
++			msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
++
++			if (msblk->block_cache[i].block ==
++							SQUASHFS_INVALID_BLK) {
++				if (!(msblk->block_cache[i].data =
++						kmalloc(SQUASHFS_METADATA_SIZE,
++						GFP_KERNEL))) {
++					ERROR("Failed to allocate cache"
++							"block\n");
++					up(&msblk->block_cache_mutex);
++					goto out;
++				}
++			}
++
++			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
++			up(&msblk->block_cache_mutex);
++
++			if (!(msblk->block_cache[i].length =
++						squashfs_read_data(s,
++						msblk->block_cache[i].data,
++						block, 0, &next_index))) {
++				ERROR("Unable to read cache block [%llx:%x]\n",
++						block, offset);
++				goto out;
++			}
++
++			down(&msblk->block_cache_mutex);
++			wake_up(&msblk->waitq);
++			msblk->block_cache[i].block = block;
++			msblk->block_cache[i].next_index = next_index;
++			TRACE("Read cache block [%llx:%x]\n", block, offset);
++		}
++
++		if (msblk->block_cache[i].block != block) {
++			up(&msblk->block_cache_mutex);
++			continue;
++		}
++
++		if ((bytes = msblk->block_cache[i].length - offset) >= length) {
++			if (buffer)
++				memcpy(buffer, msblk->block_cache[i].data +
++						offset, length);
++			if (msblk->block_cache[i].length - offset == length) {
++				*next_block = msblk->block_cache[i].next_index;
++				*next_offset = 0;
++			} else {
++				*next_block = block;
++				*next_offset = offset + length;
++			}
++			up(&msblk->block_cache_mutex);
++			goto finish;
++		} else {
++			if (buffer) {
++				memcpy(buffer, msblk->block_cache[i].data +
++						offset, bytes);
++				buffer += bytes;
++			}
++			block = msblk->block_cache[i].next_index;
++			up(&msblk->block_cache_mutex);
++			length -= bytes;
++			offset = 0;
++		}
++	}
++
++finish:
++	return return_length;
++out:
++	return 0;
++}
++
++
++static int get_fragment_location(struct super_block *s, unsigned int fragment,
++				long long *fragment_start_block,
++				unsigned int *fragment_size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	long long start_block =
++		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
++	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
++	struct squashfs_fragment_entry fragment_entry;
++
++	if (msblk->swap) {
++		struct squashfs_fragment_entry sfragment_entry;
++
++		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
++					start_block, offset,
++					sizeof(sfragment_entry), &start_block,
++					&offset))
++			goto out;
++		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
++	} else
++		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
++					start_block, offset,
++					sizeof(fragment_entry), &start_block,
++					&offset))
++			goto out;
++
++	*fragment_start_block = fragment_entry.start_block;
++	*fragment_size = fragment_entry.size;
++
++	return 1;
++
++out:
++	return 0;
++}
++
++
++SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
++					squashfs_fragment_cache *fragment)
++{
++	down(&msblk->fragment_mutex);
++	fragment->locked --;
++	wake_up(&msblk->fragment_wait_queue);
++	up(&msblk->fragment_mutex);
++}
++
++
++SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
++					*s, long long start_block,
++					int length)
++{
++	int i, n;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++
++	while ( 1 ) {
++		down(&msblk->fragment_mutex);
++
++		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
++				msblk->fragment[i].block != start_block; i++);
++
++		if (i == SQUASHFS_CACHED_FRAGMENTS) {
++			for (i = msblk->next_fragment, n =
++				SQUASHFS_CACHED_FRAGMENTS; n &&
++				msblk->fragment[i].locked; n--, i = (i + 1) %
++				SQUASHFS_CACHED_FRAGMENTS);
++
++			if (n == 0) {
++				wait_queue_t wait;
++
++				init_waitqueue_entry(&wait, current);
++				add_wait_queue(&msblk->fragment_wait_queue,
++									&wait);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++				up(&msblk->fragment_mutex);
++				schedule();
++				set_current_state(TASK_RUNNING);
++				remove_wait_queue(&msblk->fragment_wait_queue,
++									&wait);
++				continue;
++			}
++			msblk->next_fragment = (msblk->next_fragment + 1) %
++				SQUASHFS_CACHED_FRAGMENTS;
++
++			if (msblk->fragment[i].data == NULL)
++				if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
++						(SQUASHFS_FILE_MAX_SIZE))) {
++					ERROR("Failed to allocate fragment "
++							"cache block\n");
++					up(&msblk->fragment_mutex);
++					goto out;
++				}
++
++			msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
++			msblk->fragment[i].locked = 1;
++			up(&msblk->fragment_mutex);
++
++			if (!(msblk->fragment[i].length = squashfs_read_data(s,
++						msblk->fragment[i].data,
++						start_block, length, NULL))) {
++				ERROR("Unable to read fragment cache block "
++							"[%llx]\n", start_block);
++				msblk->fragment[i].locked = 0;
++				goto out;
++			}
++
++			msblk->fragment[i].block = start_block;
++			TRACE("New fragment %d, start block %lld, locked %d\n",
++						i, msblk->fragment[i].block,
++						msblk->fragment[i].locked);
++			break;
++		}
++
++		msblk->fragment[i].locked++;
++		up(&msblk->fragment_mutex);
++		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
++						msblk->fragment[i].block,
++						msblk->fragment[i].locked);
++		break;
++	}
++
++	return &msblk->fragment[i];
++
++out:
++	return NULL;
++}
++
++
++static struct inode *squashfs_new_inode(struct super_block *s,
++		struct squashfs_base_inode_header *inodeb)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct inode *i = new_inode(s);
++
++	if (i) {
++		i->i_ino = inodeb->inode_number;
++		i->i_mtime.tv_sec = inodeb->mtime;
++		i->i_atime.tv_sec = inodeb->mtime;
++		i->i_ctime.tv_sec = inodeb->mtime;
++		i->i_uid = msblk->uid[inodeb->uid];
++		i->i_mode = inodeb->mode;
++		i->i_size = 0;
++		if (inodeb->guid == SQUASHFS_GUIDS)
++			i->i_gid = i->i_uid;
++		else
++			i->i_gid = msblk->guid[inodeb->guid];
++	}
++
++	return i;
++}
++
++
++static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode)
++{
++	struct inode *i;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long block = SQUASHFS_INODE_BLK(inode) +
++		sblk->inode_table_start;
++	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
++	long long next_block;
++	unsigned int next_offset;
++	union squashfs_inode_header id, sid;
++	struct squashfs_base_inode_header *inodeb = &id.base,
++					  *sinodeb = &sid.base;
++
++	TRACE("Entered squashfs_iget\n");
++
++	if (msblk->swap) {
++		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
++					offset, sizeof(*sinodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
++					sizeof(*sinodeb));
++	} else
++		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
++					offset, sizeof(*inodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++
++	switch(inodeb->inode_type) {
++		case SQUASHFS_FILE_TYPE: {
++			unsigned int frag_size;
++			long long frag_blk;
++			struct squashfs_reg_inode_header *inodep = &id.reg;
++			struct squashfs_reg_inode_header *sinodep = &sid.reg;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = 1;
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %llx, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_LREG_TYPE: {
++			unsigned int frag_size;
++			long long frag_blk;
++			struct squashfs_lreg_inode_header *inodep = &id.lreg;
++			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %llx, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_DIR_TYPE: {
++			struct squashfs_dir_inode_header *inodep = &id.dir;
++			struct squashfs_dir_inode_header *sinodep = &sid.dir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops;
++			i->i_fop = &squashfs_dir_ops;
++			i->i_mode |= S_IFDIR;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
++			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
++
++			TRACE("Directory inode %x:%x, start_block %x, offset "
++					"%x\n", SQUASHFS_INODE_BLK(inode),
++					offset, inodep->start_block,
++					inodep->offset);
++			break;
++		}
++		case SQUASHFS_LDIR_TYPE: {
++			struct squashfs_ldir_inode_header *inodep = &id.ldir;
++			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
++						sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops;
++			i->i_fop = &squashfs_dir_ops;
++			i->i_mode |= S_IFDIR;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
++			SQUASHFS_I(i)->u.s2.directory_index_offset =
++								next_offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count =
++								inodep->i_count;
++			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
++
++			TRACE("Long directory inode %x:%x, start_block %x, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, inodep->offset);
++			break;
++		}
++		case SQUASHFS_SYMLINK_TYPE: {
++			struct squashfs_symlink_inode_header *inodep =
++								&id.symlink;
++			struct squashfs_symlink_inode_header *sinodep =
++								&sid.symlink;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
++								sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->symlink_size;
++			i->i_op = &page_symlink_inode_operations;
++			i->i_data.a_ops = &squashfs_symlink_aops;
++			i->i_mode |= S_IFLNK;
++			SQUASHFS_I(i)->start_block = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++
++			TRACE("Symbolic link inode %x:%x, start_block %llx, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					next_block, next_offset);
++			break;
++		 }
++		 case SQUASHFS_BLKDEV_TYPE:
++		 case SQUASHFS_CHRDEV_TYPE: {
++			struct squashfs_dev_inode_header *inodep = &id.dev;
++			struct squashfs_dev_inode_header *sinodep = &sid.dev;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_mode |= (inodeb->inode_type ==
++					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
++					S_IFBLK;
++			init_special_inode(i, i->i_mode,
++					old_decode_dev(inodep->rdev));
++
++			TRACE("Device inode %x:%x, rdev %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->rdev);
++			break;
++		 }
++		 case SQUASHFS_FIFO_TYPE:
++		 case SQUASHFS_SOCKET_TYPE: {
++			struct squashfs_ipc_inode_header *inodep = &id.ipc;
++			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
++							? S_IFIFO : S_IFSOCK;
++			init_special_inode(i, i->i_mode, 0);
++			break;
++		 }
++		 default:
++			ERROR("Unknown inode type %d in squashfs_iget!\n",
++					inodeb->inode_type);
++			goto failed_read1;
++	}
++
++	insert_inode_hash(i);
++	return i;
++
++failed_read:
++	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
++
++failed_read1:
++	return NULL;
++}
++
++
++static int read_fragment_index_table(struct super_block *s)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	/* Allocate fragment index table */
++	if (!(msblk->fragment_index = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES
++					(sblk->fragments), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		return 0;
++	}
++
++	if (SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments) &&
++					!squashfs_read_data(s, (char *)
++					msblk->fragment_index,
++					sblk->fragment_table_start,
++					SQUASHFS_FRAGMENT_INDEX_BYTES
++					(sblk->fragments) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		ERROR("unable to read fragment index table\n");
++		return 0;
++	}
++
++	if (msblk->swap) {
++		int i;
++		long long fragment;
++
++		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments);
++									i++) {
++			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
++						&msblk->fragment_index[i], 1);
++			msblk->fragment_index[i] = fragment;
++		}
++	}
++
++	return 1;
++}
++
++
++static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
++{
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	msblk->iget = squashfs_iget;
++	msblk->read_blocklist = read_blocklist;
++	msblk->read_fragment_index_table = read_fragment_index_table;
++
++	if (sblk->s_major == 1) {
++		if (!squashfs_1_0_supported(msblk)) {
++			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
++				"are unsupported\n");
++			SERROR("Please recompile with "
++				"Squashfs 1.0 support enabled\n");
++			return 0;
++		}
++	} else if (sblk->s_major == 2) {
++		if (!squashfs_2_0_supported(msblk)) {
++			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
++				"are unsupported\n");
++			SERROR("Please recompile with "
++				"Squashfs 2.0 support enabled\n");
++			return 0;
++		}
++	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
++			SQUASHFS_MINOR) {
++		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
++				"filesystem\n", sblk->s_major, sblk->s_minor);
++		SERROR("Please update your kernel\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++
++static int squashfs_fill_super(struct super_block *s, void *data, int silent)
++{
++	struct squashfs_sb_info *msblk;
++	struct squashfs_super_block *sblk;
++	int i;
++	char b[BDEVNAME_SIZE];
++	struct inode *root;
++
++	TRACE("Entered squashfs_read_superblock\n");
++
++	if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
++						GFP_KERNEL))) {
++		ERROR("Failed to allocate superblock\n");
++		goto failure;
++	}
++	memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
++	msblk = s->s_fs_info;
++	sblk = &msblk->sblk;
++
++	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
++	msblk->devblksize_log2 = ffz(~msblk->devblksize);
++
++	init_MUTEX(&msblk->read_data_mutex);
++	init_MUTEX(&msblk->read_page_mutex);
++	init_MUTEX(&msblk->block_cache_mutex);
++	init_MUTEX(&msblk->fragment_mutex);
++	init_MUTEX(&msblk->meta_index_mutex);
++
++	init_waitqueue_head(&msblk->waitq);
++	init_waitqueue_head(&msblk->fragment_wait_queue);
++
++	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
++					sizeof(struct squashfs_super_block) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		SERROR("unable to read superblock\n");
++		goto failed_mount;
++	}
++
++	/* Check it is a SQUASHFS superblock */
++	msblk->swap = 0;
++	if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
++		if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
++			struct squashfs_super_block ssblk;
++
++			WARNING("Mounting a different endian SQUASHFS "
++				"filesystem on %s\n", bdevname(s->s_bdev, b));
++
++			SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
++			memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
++			msblk->swap = 1;
++		} else  {
++			SERROR("Can't find a SQUASHFS superblock on %s\n",
++							bdevname(s->s_bdev, b));
++			goto failed_mount;
++		}
++	}
++
++	/* Check the MAJOR & MINOR versions */
++	if(!supported_squashfs_filesystem(msblk, silent))
++		goto failed_mount;
++
++	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
++	TRACE("Inodes are %scompressed\n",
++					SQUASHFS_UNCOMPRESSED_INODES
++					(sblk->flags) ? "un" : "");
++	TRACE("Data is %scompressed\n",
++					SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
++					? "un" : "");
++	TRACE("Check data is %s present in the filesystem\n",
++					SQUASHFS_CHECK_DATA(sblk->flags) ?
++					"" : "not");
++	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
++	TRACE("Block size %d\n", sblk->block_size);
++	TRACE("Number of inodes %d\n", sblk->inodes);
++	if (sblk->s_major > 1)
++		TRACE("Number of fragments %d\n", sblk->fragments);
++	TRACE("Number of uids %d\n", sblk->no_uids);
++	TRACE("Number of gids %d\n", sblk->no_guids);
++	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
++	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
++	if (sblk->s_major > 1)
++		TRACE("sblk->fragment_table_start %llx\n",
++					sblk->fragment_table_start);
++	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
++
++	s->s_flags |= MS_RDONLY;
++	s->s_op = &squashfs_ops;
++
++	/* Init inode_table block pointer array */
++	if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
++					SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
++		ERROR("Failed to allocate block cache\n");
++		goto failed_mount;
++	}
++
++	for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
++
++	msblk->next_cache = 0;
++
++	/* Allocate read_data block */
++	msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
++					SQUASHFS_METADATA_SIZE :
++					sblk->block_size;
++
++	if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
++		ERROR("Failed to allocate read_data block\n");
++		goto failed_mount;
++	}
++
++	/* Allocate read_page block */
++	if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
++		ERROR("Failed to allocate read_page block\n");
++		goto failed_mount;
++	}
++
++	/* Allocate uid and gid tables */
++	if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
++					sizeof(unsigned int), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		goto failed_mount;
++	}
++	msblk->guid = msblk->uid + sblk->no_uids;
++
++	if (msblk->swap) {
++		unsigned int suid[sblk->no_uids + sblk->no_guids];
++
++		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
++					((sblk->no_uids + sblk->no_guids) *
++					 sizeof(unsigned int)) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++			ERROR("unable to read uid/gid table\n");
++			goto failed_mount;
++		}
++
++		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
++			sblk->no_guids), (sizeof(unsigned int) * 8));
++	} else
++		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
++					((sblk->no_uids + sblk->no_guids) *
++					 sizeof(unsigned int)) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++			ERROR("unable to read uid/gid table\n");
++			goto failed_mount;
++		}
++
++
++	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
++		goto allocate_root;
++
++	if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
++				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
++		ERROR("Failed to allocate fragment block cache\n");
++		goto failed_mount;
++	}
++
++	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
++		msblk->fragment[i].locked = 0;
++		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
++		msblk->fragment[i].data = NULL;
++	}
++
++	msblk->next_fragment = 0;
++
++	/* Allocate fragment index table */
++	if (msblk->read_fragment_index_table(s) == 0)
++		goto failed_mount;
++
++allocate_root:
++	if ((root = (msblk->iget)(s, sblk->root_inode)) == NULL)
++		goto failed_mount;
++
++	if ((s->s_root = d_alloc_root(root)) == NULL) {
++		ERROR("Root inode create failed\n");
++		iput(root);
++		goto failed_mount;
++	}
++
++	TRACE("Leaving squashfs_read_super\n");
++	return 0;
++
++failed_mount:
++	kfree(msblk->fragment_index);
++	kfree(msblk->fragment);
++	kfree(msblk->uid);
++	kfree(msblk->read_page);
++	kfree(msblk->read_data);
++	kfree(msblk->block_cache);
++	kfree(msblk->fragment_index_2);
++	kfree(s->s_fs_info);
++	s->s_fs_info = NULL;
++	return -EINVAL;
++
++failure:
++	return -ENOMEM;
++}
++
++
++static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct squashfs_sb_info *msblk = dentry->d_inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	TRACE("Entered squashfs_statfs\n");
++
++	buf->f_type = SQUASHFS_MAGIC;
++	buf->f_bsize = sblk->block_size;
++	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
++	buf->f_bfree = buf->f_bavail = 0;
++	buf->f_files = sblk->inodes;
++	buf->f_ffree = 0;
++	buf->f_namelen = SQUASHFS_NAME_LEN;
++
++	return 0;
++}
++
++
++static int squashfs_symlink_readpage(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
++	long long block = SQUASHFS_I(inode)->start_block;
++	int offset = SQUASHFS_I(inode)->offset;
++	void *pageaddr = kmap(page);
++
++	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
++				"%llx, offset %x\n", page->index,
++				SQUASHFS_I(inode)->start_block,
++				SQUASHFS_I(inode)->offset);
++
++	for (length = 0; length < index; length += bytes) {
++		if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
++				block, offset, PAGE_CACHE_SIZE, &block,
++				&offset))) {
++			ERROR("Unable to read symbolic link [%llx:%x]\n", block,
++					offset);
++			goto skip_read;
++		}
++	}
++
++	if (length != index) {
++		ERROR("(squashfs_symlink_readpage) length != index\n");
++		bytes = 0;
++		goto skip_read;
++	}
++
++	bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
++					i_size_read(inode) - length;
++
++	if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
++					offset, bytes, &block, &offset)))
++		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
++
++skip_read:
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
++{
++	struct meta_index *meta = NULL;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	int i;
++
++	down(&msblk->meta_index_mutex);
++
++	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
++
++	if(msblk->meta_index == NULL)
++		goto not_allocated;
++
++	for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
++		if (msblk->meta_index[i].inode_number == inode->i_ino &&
++				msblk->meta_index[i].offset >= offset &&
++				msblk->meta_index[i].offset <= index &&
++				msblk->meta_index[i].locked == 0) {
++			TRACE("locate_meta_index: entry %d, offset %d\n", i,
++					msblk->meta_index[i].offset);
++			meta = &msblk->meta_index[i];
++			offset = meta->offset;
++		}
++
++	if (meta)
++		meta->locked = 1;
++
++not_allocated:
++	up(&msblk->meta_index_mutex);
++
++	return meta;
++}
++
++
++struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
++{
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct meta_index *meta = NULL;
++	int i;
++
++	down(&msblk->meta_index_mutex);
++
++	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
++
++	if(msblk->meta_index == NULL) {
++		if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
++					SQUASHFS_META_NUMBER, GFP_KERNEL))) {
++			ERROR("Failed to allocate meta_index\n");
++			goto failed;
++		}
++		for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
++			msblk->meta_index[i].inode_number = 0;
++			msblk->meta_index[i].locked = 0;
++		}
++		msblk->next_meta_index = 0;
++	}
++
++	for(i = SQUASHFS_META_NUMBER; i &&
++			msblk->meta_index[msblk->next_meta_index].locked; i --)
++		msblk->next_meta_index = (msblk->next_meta_index + 1) %
++			SQUASHFS_META_NUMBER;
++
++	if(i == 0) {
++		TRACE("empty_meta_index: failed!\n");
++		goto failed;
++	}
++
++	TRACE("empty_meta_index: returned meta entry %d, %p\n",
++			msblk->next_meta_index,
++			&msblk->meta_index[msblk->next_meta_index]);
++
++	meta = &msblk->meta_index[msblk->next_meta_index];
++	msblk->next_meta_index = (msblk->next_meta_index + 1) %
++			SQUASHFS_META_NUMBER;
++
++	meta->inode_number = inode->i_ino;
++	meta->offset = offset;
++	meta->skip = skip;
++	meta->entries = 0;
++	meta->locked = 1;
++
++failed:
++	up(&msblk->meta_index_mutex);
++	return meta;
++}
++
++
++void release_meta_index(struct inode *inode, struct meta_index *meta)
++{
++	meta->locked = 0;
++}
++
++
++static int read_block_index(struct super_block *s, int blocks, char *block_list,
++		long long *start_block, int *offset)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	unsigned int *block_listp;
++	int block = 0;
++
++	if (msblk->swap) {
++		char sblock_list[blocks << 2];
++
++		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
++				*offset, blocks << 2, start_block, offset)) {
++			ERROR("Unable to read block list [%llx:%x]\n",
++				*start_block, *offset);
++			goto failure;
++		}
++		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
++				((unsigned int *)sblock_list), blocks);
++	} else
++		if (!squashfs_get_cached_block(s, block_list, *start_block,
++				*offset, blocks << 2, start_block, offset)) {
++			ERROR("Unable to read block list [%llx:%x]\n",
++				*start_block, *offset);
++			goto failure;
++		}
++
++	for (block_listp = (unsigned int *) block_list; blocks;
++				block_listp++, blocks --)
++		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
++
++	return block;
++
++failure:
++	return -1;
++}
++
++
++#define SIZE 256
++
++static inline int calculate_skip(int blocks) {
++	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
++	return skip >= 7 ? 7 : skip + 1;
++}
++
++
++static int get_meta_index(struct inode *inode, int index,
++		long long *index_block, int *index_offset,
++		long long *data_block, char *block_list)
++{
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
++	int offset = 0;
++	struct meta_index *meta;
++	struct meta_entry *meta_entry;
++	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
++	int cur_offset = SQUASHFS_I(inode)->offset;
++	long long cur_data_block = SQUASHFS_I(inode)->start_block;
++	int i;
++
++	index /= SQUASHFS_META_INDEXES * skip;
++
++	while ( offset < index ) {
++		meta = locate_meta_index(inode, index, offset + 1);
++
++		if (meta == NULL) {
++			if ((meta = empty_meta_index(inode, offset + 1,
++							skip)) == NULL)
++				goto all_done;
++		} else {
++			offset = index < meta->offset + meta->entries ? index :
++				meta->offset + meta->entries - 1;
++			meta_entry = &meta->meta_entry[offset - meta->offset];
++			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
++			cur_offset = meta_entry->offset;
++			cur_data_block = meta_entry->data_block;
++			TRACE("get_meta_index: offset %d, meta->offset %d, "
++				"meta->entries %d\n", offset, meta->offset,
++				meta->entries);
++			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
++				" data_block 0x%llx\n", cur_index_block,
++				cur_offset, cur_data_block);
++		}
++
++		for (i = meta->offset + meta->entries; i <= index &&
++				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
++			int blocks = skip * SQUASHFS_META_INDEXES;
++
++			while (blocks) {
++				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
++					blocks;
++				int res = read_block_index(inode->i_sb, block,
++					block_list, &cur_index_block,
++					&cur_offset);
++
++				if (res == -1)
++					goto failed;
++
++				cur_data_block += res;
++				blocks -= block;
++			}
++
++			meta_entry = &meta->meta_entry[i - meta->offset];
++			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
++			meta_entry->offset = cur_offset;
++			meta_entry->data_block = cur_data_block;
++			meta->entries ++;
++			offset ++;
++		}
++
++		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
++				meta->offset, meta->entries);
++
++		release_meta_index(inode, meta);
++	}
++
++all_done:
++	*index_block = cur_index_block;
++	*index_offset = cur_offset;
++	*data_block = cur_data_block;
++
++	return offset * SQUASHFS_META_INDEXES * skip;
++
++failed:
++	release_meta_index(inode, meta);
++	return -1;
++}
++
++
++static long long read_blocklist(struct inode *inode, int index,
++				int readahead_blks, char *block_list,
++				unsigned short **block_p, unsigned int *bsize)
++{
++	long long block_ptr;
++	int offset;
++	long long block;
++	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
++		block_list);
++
++	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
++		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
++		       block);
++
++	if(res == -1)
++		goto failure;
++
++	index -= res;
++
++	while ( index ) {
++		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
++		int res = read_block_index(inode->i_sb, blocks, block_list,
++			&block_ptr, &offset);
++		if (res == -1)
++			goto failure;
++		block += res;
++		index -= blocks;
++	}
++
++	if (read_block_index(inode->i_sb, 1, block_list,
++			&block_ptr, &offset) == -1)
++		goto failure;
++	*bsize = *((unsigned int *) block_list);
++
++	return block;
++
++failure:
++	return 0;
++}
++
++
++static int squashfs_readpage(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned char block_list[SIZE];
++	long long block;
++	unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
++	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
++ 	void *pageaddr;
++	struct squashfs_fragment_cache *fragment = NULL;
++	char *data_ptr = msblk->read_page;
++
++	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
++	int start_index = page->index & ~mask;
++	int end_index = start_index | mask;
++
++	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
++					page->index,
++					SQUASHFS_I(inode)->start_block);
++
++	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
++					PAGE_CACHE_SHIFT))
++		goto skip_read;
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| index < (i_size_read(inode) >>
++					sblk->block_log)) {
++		if ((block = (msblk->read_blocklist)(inode, index, 1,
++					block_list, NULL, &bsize)) == 0)
++			goto skip_read;
++
++		down(&msblk->read_page_mutex);
++
++		if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
++					block, bsize, NULL))) {
++			ERROR("Unable to read page, block %llx, size %x\n", block,
++					bsize);
++			up(&msblk->read_page_mutex);
++			goto skip_read;
++		}
++	} else {
++		if ((fragment = get_cached_fragment(inode->i_sb,
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					SQUASHFS_I(inode)->u.s1.fragment_size))
++					== NULL) {
++			ERROR("Unable to read page, block %llx, size %x\n",
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					(int) SQUASHFS_I(inode)->
++					u.s1.fragment_size);
++			goto skip_read;
++		}
++		bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
++					(i_size_read(inode) & (sblk->block_size
++					- 1));
++		byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
++		data_ptr = fragment->data;
++	}
++
++	for (i = start_index; i <= end_index && byte_offset < bytes;
++					i++, byte_offset += PAGE_CACHE_SIZE) {
++		struct page *push_page;
++		int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
++					PAGE_CACHE_SIZE : bytes - byte_offset;
++
++		TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
++					bytes, i, byte_offset, available_bytes);
++
++		if (i == page->index)  {
++			pageaddr = kmap_atomic(page, KM_USER0);
++			memcpy(pageaddr, data_ptr + byte_offset,
++					available_bytes);
++			memset(pageaddr + available_bytes, 0,
++					PAGE_CACHE_SIZE - available_bytes);
++			kunmap_atomic(pageaddr, KM_USER0);
++			flush_dcache_page(page);
++			SetPageUptodate(page);
++			unlock_page(page);
++		} else if ((push_page =
++				grab_cache_page_nowait(page->mapping, i))) {
++ 			pageaddr = kmap_atomic(push_page, KM_USER0);
++
++			memcpy(pageaddr, data_ptr + byte_offset,
++					available_bytes);
++			memset(pageaddr + available_bytes, 0,
++					PAGE_CACHE_SIZE - available_bytes);
++			kunmap_atomic(pageaddr, KM_USER0);
++			flush_dcache_page(push_page);
++			SetPageUptodate(push_page);
++			unlock_page(push_page);
++			page_cache_release(push_page);
++		}
++	}
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| index < (i_size_read(inode) >>
++					sblk->block_log))
++		up(&msblk->read_page_mutex);
++	else
++		release_cached_fragment(msblk, fragment);
++
++	return 0;
++
++skip_read:
++	pageaddr = kmap_atomic(page, KM_USER0);
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap_atomic(pageaddr, KM_USER0);
++	flush_dcache_page(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++static int squashfs_readpage4K(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned char block_list[SIZE];
++	long long block;
++	unsigned int bsize, bytes = 0;
++ 	void *pageaddr;
++
++	TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
++					page->index,
++					SQUASHFS_I(inode)->start_block);
++
++	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
++					PAGE_CACHE_SHIFT)) {
++		pageaddr = kmap_atomic(page, KM_USER0);
++		goto skip_read;
++	}
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| page->index < (i_size_read(inode) >>
++					sblk->block_log)) {
++		block = (msblk->read_blocklist)(inode, page->index, 1,
++					block_list, NULL, &bsize);
++
++		down(&msblk->read_page_mutex);
++		bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
++					bsize, NULL);
++		pageaddr = kmap_atomic(page, KM_USER0);
++		if (bytes)
++			memcpy(pageaddr, msblk->read_page, bytes);
++		else
++			ERROR("Unable to read page, block %llx, size %x\n",
++					block, bsize);
++		up(&msblk->read_page_mutex);
++	} else {
++		struct squashfs_fragment_cache *fragment =
++			get_cached_fragment(inode->i_sb,
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					SQUASHFS_I(inode)-> u.s1.fragment_size);
++		pageaddr = kmap_atomic(page, KM_USER0);
++		if (fragment) {
++			bytes = i_size_read(inode) & (sblk->block_size - 1);
++			memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
++					u.s1.fragment_offset, bytes);
++			release_cached_fragment(msblk, fragment);
++		} else
++			ERROR("Unable to read page, block %llx, size %x\n",
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block, (int)
++					SQUASHFS_I(inode)-> u.s1.fragment_size);
++	}
++
++skip_read:
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap_atomic(pageaddr, KM_USER0);
++	flush_dcache_page(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++static int get_dir_index_using_offset(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				long long f_pos)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	struct squashfs_dir_index index;
++
++	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
++					i_count, (unsigned int) f_pos);
++
++	f_pos =- 3;
++	if (f_pos == 0)
++		goto finish;
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) &index,
++					index_start, index_offset,
++					sizeof(index), &index_start,
++					&index_offset);
++
++		if (index.index > f_pos)
++			break;
++
++		squashfs_get_cached_block(s, NULL, index_start, index_offset,
++					index.size + 1, &index_start,
++					&index_offset);
++
++		length = index.index;
++		*next_block = index.start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++
++finish:
++	return length + 3;
++}
++
++
++static int get_dir_index_using_name(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				const char *name, int size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	char buffer[sizeof(struct squashfs_dir_index) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_index *index = (struct squashfs_dir_index *) buffer;
++	char str[SQUASHFS_NAME_LEN + 1];
++
++	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
++
++	strncpy(str, name, size);
++	str[size] = '\0';
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) index,
++					index_start, index_offset,
++					sizeof(struct squashfs_dir_index),
++					&index_start, &index_offset);
++
++		squashfs_get_cached_block(s, index->name, index_start,
++					index_offset, index->size + 1,
++					&index_start, &index_offset);
++
++		index->name[index->size + 1] = '\0';
++
++		if (strcmp(index->name, str) > 0)
++			break;
++
++		length = index->index;
++		*next_block = index->start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++	return length + 3;
++}
++
++
++static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
++{
++	struct inode *i = file->f_dentry->d_inode;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++		sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
++		dir_count;
++	struct squashfs_dir_header dirh;
++	char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
++
++	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
++
++	while(file->f_pos < 3) {
++		char *name;
++		int size, i_ino;
++
++		if(file->f_pos == 0) {
++			name = ".";
++			size = 1;
++			i_ino = i->i_ino;
++		} else {
++			name = "..";
++			size = 2;
++			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
++		}
++		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
++				(unsigned int) dirent, name, size, (int)
++				file->f_pos, i_ino,
++				squashfs_filetype_table[1]);
++
++		if (filldir(dirent, name, size,
++				file->f_pos, i_ino,
++				squashfs_filetype_table[1]) < 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++		}
++		file->f_pos += size;
++		dirs_read++;
++	}
++
++	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count,
++				file->f_pos);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header sdirh;
++
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block, next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block, next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++						next_block, next_offset,
++						dire->size + 1, &next_block,
++						&next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (file->f_pos >= length)
++				continue;
++
++			dire->name[dire->size + 1] = '\0';
++
++			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
++					(unsigned int) dirent, dire->name,
++					dire->size + 1, (int) file->f_pos,
++					dirh.start_block, dire->offset,
++					dirh.inode_number + dire->inode_number,
++					squashfs_filetype_table[dire->type]);
++
++			if (filldir(dirent, dire->name, dire->size + 1,
++					file->f_pos,
++					dirh.inode_number + dire->inode_number,
++					squashfs_filetype_table[dire->type])
++					< 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++			}
++			file->f_pos = length;
++			dirs_read++;
++		}
++	}
++
++finish:
++	return dirs_read;
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	return 0;
++}
++
++
++static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
++				struct nameidata *nd)
++{
++	const unsigned char *name = dentry->d_name.name;
++	int len = dentry->d_name.len;
++	struct inode *inode = NULL;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++				sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0,
++				dir_count;
++	struct squashfs_dir_header dirh;
++	char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN];
++	struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
++
++	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
++
++	if (len > SQUASHFS_NAME_LEN)
++		goto exit_loop;
++
++	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count, name,
++				len);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header sdirh;
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block,next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block,next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++					next_block, next_offset, dire->size + 1,
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (name[0] < dire->name[0])
++				goto exit_loop;
++
++			if ((len == dire->size + 1) && !strncmp(name,
++						dire->name, len)) {
++				squashfs_inode_t ino =
++					SQUASHFS_MKINODE(dirh.start_block,
++					dire->offset);
++
++				TRACE("calling squashfs_iget for directory "
++					"entry %s, inode %x:%x, %d\n", name,
++					dirh.start_block, dire->offset,
++					dirh.inode_number + dire->inode_number);
++
++				inode = (msblk->iget)(i->i_sb, ino);
++
++				goto exit_loop;
++			}
++		}
++	}
++
++exit_loop:
++	d_add(dentry, inode);
++	return ERR_PTR(0);
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	goto exit_loop;
++}
++
++
++static void squashfs_put_super(struct super_block *s)
++{
++	int i;
++
++	if (s->s_fs_info) {
++		struct squashfs_sb_info *sbi = s->s_fs_info;
++		if (sbi->block_cache)
++			for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++				if (sbi->block_cache[i].block !=
++							SQUASHFS_INVALID_BLK)
++					kfree(sbi->block_cache[i].data);
++		if (sbi->fragment)
++			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
++				SQUASHFS_FREE(sbi->fragment[i].data);
++		kfree(sbi->fragment);
++		kfree(sbi->block_cache);
++		kfree(sbi->read_data);
++		kfree(sbi->read_page);
++		kfree(sbi->uid);
++		kfree(sbi->fragment_index);
++		kfree(sbi->fragment_index_2);
++		kfree(sbi->meta_index);
++		kfree(s->s_fs_info);
++		s->s_fs_info = NULL;
++	}
++}
++
++
++static int squashfs_get_sb(struct file_system_type *fs_type,
++			int flags, const char *dev_name, void *data,
++			struct vfsmount *mnt)
++{
++	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, mnt);
++}
++
++
++static int __init init_squashfs_fs(void)
++{
++	int err = init_inodecache();
++	if (err)
++		goto out;
++
++	printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
++		"Phillip Lougher\n");
++
++	if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
++		ERROR("Failed to allocate zlib workspace\n");
++		destroy_inodecache();
++		err = -ENOMEM;
++		goto out;
++	}
++
++	if ((err = register_filesystem(&squashfs_fs_type))) {
++		vfree(stream.workspace);
++		destroy_inodecache();
++	}
++
++out:
++	return err;
++}
++
++
++static void __exit exit_squashfs_fs(void)
++{
++	vfree(stream.workspace);
++	unregister_filesystem(&squashfs_fs_type);
++	destroy_inodecache();
++}
++
++
++static struct kmem_cache * squashfs_inode_cachep;
++
++
++static struct inode *squashfs_alloc_inode(struct super_block *sb)
++{
++	struct squashfs_inode_info *ei;
++	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
++	if (!ei)
++		return NULL;
++	return &ei->vfs_inode;
++}
++
++
++static void squashfs_destroy_inode(struct inode *inode)
++{
++	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
++}
++
++
++static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
++{
++	struct squashfs_inode_info *ei = foo;
++
++	inode_init_once(&ei->vfs_inode);
++}
++
++
++static int __init init_inodecache(void)
++{
++	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
++	     sizeof(struct squashfs_inode_info),
++	     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
++	     init_once, NULL);
++	if (squashfs_inode_cachep == NULL)
++		return -ENOMEM;
++	return 0;
++}
++
++
++static void destroy_inodecache(void)
++{
++	kmem_cache_destroy(squashfs_inode_cachep);
++}
++
++
++module_init(init_squashfs_fs);
++module_exit(exit_squashfs_fs);
++MODULE_DESCRIPTION("squashfs, a compressed read-only filesystem");
++MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/fs/squashfs/Makefile linux-2.6.21.1.dev/fs/squashfs/Makefile
+--- linux-2.6.21.1.old/fs/squashfs/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/Makefile	2007-05-26 19:00:37.123351456 +0200
+@@ -0,0 +1,7 @@
++#
++# Makefile for the linux squashfs routines.
++#
++
++obj-$(CONFIG_SQUASHFS) += squashfs.o
++squashfs-y += inode.o
++squashfs-y += squashfs2_0.o
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs2_0.c linux-2.6.21.1.dev/fs/squashfs/squashfs2_0.c
+--- linux-2.6.21.1.old/fs/squashfs/squashfs2_0.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs2_0.c	2007-05-26 19:00:37.125351152 +0200
+@@ -0,0 +1,758 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs2_0.c
++ */
++
++#include <linux/types.h>
++#include <linux/squashfs_fs.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/squashfs_fs_sb.h>
++#include <linux/squashfs_fs_i.h>
++#include <linux/buffer_head.h>
++#include <linux/vfs.h>
++#include <linux/init.h>
++#include <linux/dcache.h>
++#include <linux/wait.h>
++#include <linux/zlib.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <asm/semaphore.h>
++
++#include "squashfs.h"
++static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir);
++static struct dentry *squashfs_lookup_2(struct inode *, struct dentry *,
++				struct nameidata *);
++
++static struct file_operations squashfs_dir_ops_2 = {
++	.read = generic_read_dir,
++	.readdir = squashfs_readdir_2
++};
++
++static struct inode_operations squashfs_dir_inode_ops_2 = {
++	.lookup = squashfs_lookup_2
++};
++
++static unsigned char squashfs_filetype_table[] = {
++	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
++};
++
++static int read_fragment_index_table_2(struct super_block *s)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2
++					(sblk->fragments), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		return 0;
++	}
++
++	if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) &&
++					!squashfs_read_data(s, (char *)
++					msblk->fragment_index_2,
++					sblk->fragment_table_start,
++					SQUASHFS_FRAGMENT_INDEX_BYTES_2
++					(sblk->fragments) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		ERROR("unable to read fragment index table\n");
++		return 0;
++	}
++
++	if (msblk->swap) {
++		int i;
++		unsigned int fragment;
++
++		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments);
++									i++) {
++			SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment),
++						&msblk->fragment_index_2[i], 1);
++			msblk->fragment_index_2[i] = fragment;
++		}
++	}
++
++	return 1;
++}
++
++
++static int get_fragment_location_2(struct super_block *s, unsigned int fragment,
++				long long *fragment_start_block,
++				unsigned int *fragment_size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	long long start_block =
++		msblk->fragment_index_2[SQUASHFS_FRAGMENT_INDEX_2(fragment)];
++	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET_2(fragment);
++	struct squashfs_fragment_entry_2 fragment_entry;
++
++	if (msblk->swap) {
++		struct squashfs_fragment_entry_2 sfragment_entry;
++
++		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
++					start_block, offset,
++					sizeof(sfragment_entry), &start_block,
++					&offset))
++			goto out;
++		SQUASHFS_SWAP_FRAGMENT_ENTRY_2(&fragment_entry, &sfragment_entry);
++	} else
++		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
++					start_block, offset,
++					sizeof(fragment_entry), &start_block,
++					&offset))
++			goto out;
++
++	*fragment_start_block = fragment_entry.start_block;
++	*fragment_size = fragment_entry.size;
++
++	return 1;
++
++out:
++	return 0;
++}
++
++
++static struct inode *squashfs_new_inode(struct super_block *s,
++		struct squashfs_base_inode_header_2 *inodeb, unsigned int ino)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	struct inode *i = new_inode(s);
++
++	if (i) {
++		i->i_ino = ino;
++		i->i_mtime.tv_sec = sblk->mkfs_time;
++		i->i_atime.tv_sec = sblk->mkfs_time;
++		i->i_ctime.tv_sec = sblk->mkfs_time;
++		i->i_uid = msblk->uid[inodeb->uid];
++		i->i_mode = inodeb->mode;
++		i->i_nlink = 1;
++		i->i_size = 0;
++		if (inodeb->guid == SQUASHFS_GUIDS)
++			i->i_gid = i->i_uid;
++		else
++			i->i_gid = msblk->guid[inodeb->guid];
++	}
++
++	return i;
++}
++
++
++static struct inode *squashfs_iget_2(struct super_block *s, squashfs_inode_t inode)
++{
++	struct inode *i;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned int block = SQUASHFS_INODE_BLK(inode) +
++		sblk->inode_table_start;
++	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
++	unsigned int ino = SQUASHFS_MK_VFS_INODE(block
++		- sblk->inode_table_start, offset);
++	long long next_block;
++	unsigned int next_offset;
++	union squashfs_inode_header_2 id, sid;
++	struct squashfs_base_inode_header_2 *inodeb = &id.base,
++					  *sinodeb = &sid.base;
++
++	TRACE("Entered squashfs_iget\n");
++
++	if (msblk->swap) {
++		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
++					offset, sizeof(*sinodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++		SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb,
++					sizeof(*sinodeb));
++	} else
++		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
++					offset, sizeof(*inodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++
++	switch(inodeb->inode_type) {
++		case SQUASHFS_FILE_TYPE: {
++			struct squashfs_reg_inode_header_2 *inodep = &id.reg;
++			struct squashfs_reg_inode_header_2 *sinodep = &sid.reg;
++			long long frag_blk;
++			unsigned int frag_size;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location_2(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			i->i_blksize = PAGE_CACHE_SIZE;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %x, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_DIR_TYPE: {
++			struct squashfs_dir_inode_header_2 *inodep = &id.dir;
++			struct squashfs_dir_inode_header_2 *sinodep = &sid.dir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops_2;
++			i->i_fop = &squashfs_dir_ops_2;
++			i->i_mode |= S_IFDIR;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
++			SQUASHFS_I(i)->u.s2.parent_inode = 0;
++
++			TRACE("Directory inode %x:%x, start_block %x, offset "
++					"%x\n", SQUASHFS_INODE_BLK(inode),
++					offset, inodep->start_block,
++					inodep->offset);
++			break;
++		}
++		case SQUASHFS_LDIR_TYPE: {
++			struct squashfs_ldir_inode_header_2 *inodep = &id.ldir;
++			struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep,
++						sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops_2;
++			i->i_fop = &squashfs_dir_ops_2;
++			i->i_mode |= S_IFDIR;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
++			SQUASHFS_I(i)->u.s2.directory_index_offset =
++								next_offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count =
++								inodep->i_count;
++			SQUASHFS_I(i)->u.s2.parent_inode = 0;
++
++			TRACE("Long directory inode %x:%x, start_block %x, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, inodep->offset);
++			break;
++		}
++		case SQUASHFS_SYMLINK_TYPE: {
++			struct squashfs_symlink_inode_header_2 *inodep =
++								&id.symlink;
++			struct squashfs_symlink_inode_header_2 *sinodep =
++								&sid.symlink;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
++								sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->symlink_size;
++			i->i_op = &page_symlink_inode_operations;
++			i->i_data.a_ops = &squashfs_symlink_aops;
++			i->i_mode |= S_IFLNK;
++			SQUASHFS_I(i)->start_block = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++
++			TRACE("Symbolic link inode %x:%x, start_block %llx, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					next_block, next_offset);
++			break;
++		 }
++		 case SQUASHFS_BLKDEV_TYPE:
++		 case SQUASHFS_CHRDEV_TYPE: {
++			struct squashfs_dev_inode_header_2 *inodep = &id.dev;
++			struct squashfs_dev_inode_header_2 *sinodep = &sid.dev;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_mode |= (inodeb->inode_type ==
++					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
++					S_IFBLK;
++			init_special_inode(i, i->i_mode,
++					old_decode_dev(inodep->rdev));
++
++			TRACE("Device inode %x:%x, rdev %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->rdev);
++			break;
++		 }
++		 case SQUASHFS_FIFO_TYPE:
++		 case SQUASHFS_SOCKET_TYPE: {
++			if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
++							? S_IFIFO : S_IFSOCK;
++			init_special_inode(i, i->i_mode, 0);
++			break;
++		 }
++		 default:
++			ERROR("Unknown inode type %d in squashfs_iget!\n",
++					inodeb->inode_type);
++			goto failed_read1;
++	}
++
++	insert_inode_hash(i);
++	return i;
++
++failed_read:
++	ERROR("Unable to read inode [%x:%x]\n", block, offset);
++
++failed_read1:
++	return NULL;
++}
++
++
++static int get_dir_index_using_offset(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				long long f_pos)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	struct squashfs_dir_index_2 index;
++
++	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
++					i_count, (unsigned int) f_pos);
++
++	if (f_pos == 0)
++		goto finish;
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index_2 sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX_2(&index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) &index,
++					index_start, index_offset,
++					sizeof(index), &index_start,
++					&index_offset);
++
++		if (index.index > f_pos)
++			break;
++
++		squashfs_get_cached_block(s, NULL, index_start, index_offset,
++					index.size + 1, &index_start,
++					&index_offset);
++
++		length = index.index;
++		*next_block = index.start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++
++finish:
++	return length;
++}
++
++
++static int get_dir_index_using_name(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				const char *name, int size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	char buffer[sizeof(struct squashfs_dir_index_2) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_index_2 *index = (struct squashfs_dir_index_2 *) buffer;
++	char str[SQUASHFS_NAME_LEN + 1];
++
++	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
++
++	strncpy(str, name, size);
++	str[size] = '\0';
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index_2 sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX_2(index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) index,
++					index_start, index_offset,
++					sizeof(struct squashfs_dir_index_2),
++					&index_start, &index_offset);
++
++		squashfs_get_cached_block(s, index->name, index_start,
++					index_offset, index->size + 1,
++					&index_start, &index_offset);
++
++		index->name[index->size + 1] = '\0';
++
++		if (strcmp(index->name, str) > 0)
++			break;
++
++		length = index->index;
++		*next_block = index->start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++	return length;
++}
++
++
++static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir)
++{
++	struct inode *i = file->f_dentry->d_inode;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++		sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
++		dir_count;
++	struct squashfs_dir_header_2 dirh;
++	char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
++
++	TRACE("Entered squashfs_readdir_2 [%llx:%x]\n", next_block, next_offset);
++
++	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count,
++				file->f_pos);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header_2 sdirh;
++
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry_2 sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block, next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block, next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++						next_block, next_offset,
++						dire->size + 1, &next_block,
++						&next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (file->f_pos >= length)
++				continue;
++
++			dire->name[dire->size + 1] = '\0';
++
++			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d)\n",
++					(unsigned int) dirent, dire->name,
++					dire->size + 1, (int) file->f_pos,
++					dirh.start_block, dire->offset,
++					squashfs_filetype_table[dire->type]);
++
++			if (filldir(dirent, dire->name, dire->size + 1,
++					file->f_pos, SQUASHFS_MK_VFS_INODE(
++					dirh.start_block, dire->offset),
++					squashfs_filetype_table[dire->type])
++					< 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++			}
++			file->f_pos = length;
++			dirs_read++;
++		}
++	}
++
++finish:
++	return dirs_read;
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	return 0;
++}
++
++
++static struct dentry *squashfs_lookup_2(struct inode *i, struct dentry *dentry,
++				struct nameidata *nd)
++{
++	const unsigned char *name = dentry->d_name.name;
++	int len = dentry->d_name.len;
++	struct inode *inode = NULL;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++				sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0,
++				dir_count;
++	struct squashfs_dir_header_2 dirh;
++	char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN];
++	struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
++	int sorted = sblk->s_major == 2 && sblk->s_minor >= 1;
++
++	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
++
++	if (len > SQUASHFS_NAME_LEN)
++		goto exit_loop;
++
++	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count, name,
++				len);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header_2 sdirh;
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry_2 sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block,next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block,next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++					next_block, next_offset, dire->size + 1,
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (sorted && name[0] < dire->name[0])
++				goto exit_loop;
++
++			if ((len == dire->size + 1) && !strncmp(name,
++						dire->name, len)) {
++				squashfs_inode_t ino =
++					SQUASHFS_MKINODE(dirh.start_block,
++					dire->offset);
++
++				TRACE("calling squashfs_iget for directory "
++					"entry %s, inode %x:%x, %lld\n", name,
++					dirh.start_block, dire->offset, ino);
++
++				inode = (msblk->iget)(i->i_sb, ino);
++
++				goto exit_loop;
++			}
++		}
++	}
++
++exit_loop:
++	d_add(dentry, inode);
++	return ERR_PTR(0);
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	goto exit_loop;
++}
++
++
++int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
++{
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	msblk->iget = squashfs_iget_2;
++	msblk->read_fragment_index_table = read_fragment_index_table_2;
++
++	sblk->bytes_used = sblk->bytes_used_2;
++	sblk->uid_start = sblk->uid_start_2;
++	sblk->guid_start = sblk->guid_start_2;
++	sblk->inode_table_start = sblk->inode_table_start_2;
++	sblk->directory_table_start = sblk->directory_table_start_2;
++	sblk->fragment_table_start = sblk->fragment_table_start_2;
++
++	return 1;
++}
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs.h linux-2.6.21.1.dev/fs/squashfs/squashfs.h
+--- linux-2.6.21.1.old/fs/squashfs/squashfs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs.h	2007-05-26 19:00:37.125351152 +0200
+@@ -0,0 +1,86 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs.h
++ */
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++#endif
++
++#ifdef SQUASHFS_TRACE
++#define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
++#else
++#define TRACE(s, args...)	{}
++#endif
++
++#define ERROR(s, args...)	printk(KERN_ERR "SQUASHFS error: "s, ## args)
++
++#define SERROR(s, args...)	do { \
++				if (!silent) \
++				printk(KERN_ERR "SQUASHFS error: "s, ## args);\
++				} while(0)
++
++#define WARNING(s, args...)	printk(KERN_WARNING "SQUASHFS: "s, ## args)
++
++static inline struct squashfs_inode_info *SQUASHFS_I(struct inode *inode)
++{
++	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
++}
++
++#if defined(CONFIG_SQUASHFS_1_0_COMPATIBILITY ) || defined(CONFIG_SQUASHFS_2_0_COMPATIBILITY)
++#define SQSH_EXTERN
++extern unsigned int squashfs_read_data(struct super_block *s, char *buffer,
++				long long index, unsigned int length,
++				long long *next_index);
++extern int squashfs_get_cached_block(struct super_block *s, char *buffer,
++				long long block, unsigned int offset,
++				int length, long long *next_block,
++				unsigned int *next_offset);
++extern void release_cached_fragment(struct squashfs_sb_info *msblk, struct
++					squashfs_fragment_cache *fragment);
++extern struct squashfs_fragment_cache *get_cached_fragment(struct super_block
++					*s, long long start_block,
++					int length);
++extern struct address_space_operations squashfs_symlink_aops;
++extern struct address_space_operations squashfs_aops;
++extern struct address_space_operations squashfs_aops_4K;
++extern struct inode_operations squashfs_dir_inode_ops;
++#else
++#define SQSH_EXTERN static
++#endif
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++extern int squashfs_1_0_supported(struct squashfs_sb_info *msblk);
++#else
++static inline int squashfs_1_0_supported(struct squashfs_sb_info *msblk)
++{
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++extern int squashfs_2_0_supported(struct squashfs_sb_info *msblk);
++#else
++static inline int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
++{
++	return 0;
++}
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs.h linux-2.6.21.1.dev/include/linux/squashfs_fs.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs.h	2007-05-26 19:00:37.143348416 +0200
+@@ -0,0 +1,911 @@
++#ifndef SQUASHFS_FS
++#define SQUASHFS_FS
++
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs.h
++ */
++
++#ifndef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#define CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#endif
++
++#ifdef	CONFIG_SQUASHFS_VMALLOC
++#define SQUASHFS_ALLOC(a)		vmalloc(a)
++#define SQUASHFS_FREE(a)		vfree(a)
++#else
++#define SQUASHFS_ALLOC(a)		kmalloc(a, GFP_KERNEL)
++#define SQUASHFS_FREE(a)		kfree(a)
++#endif
++#define SQUASHFS_CACHED_FRAGMENTS	CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
++#define SQUASHFS_MAJOR			3
++#define SQUASHFS_MINOR			0
++#define SQUASHFS_MAGIC			0x73717368
++#define SQUASHFS_MAGIC_SWAP		0x68737173
++#define SQUASHFS_START			0
++
++/* size of metadata (inode and directory) blocks */
++#define SQUASHFS_METADATA_SIZE		8192
++#define SQUASHFS_METADATA_LOG		13
++
++/* default size of data blocks */
++#define SQUASHFS_FILE_SIZE		65536
++#define SQUASHFS_FILE_LOG		16
++
++#define SQUASHFS_FILE_MAX_SIZE		65536
++
++/* Max number of uids and gids */
++#define SQUASHFS_UIDS			256
++#define SQUASHFS_GUIDS			255
++
++/* Max length of filename (not 255) */
++#define SQUASHFS_NAME_LEN		256
++
++#define SQUASHFS_INVALID		((long long) 0xffffffffffff)
++#define SQUASHFS_INVALID_FRAG		((unsigned int) 0xffffffff)
++#define SQUASHFS_INVALID_BLK		((long long) -1)
++#define SQUASHFS_USED_BLK		((long long) -2)
++
++/* Filesystem flags */
++#define SQUASHFS_NOI			0
++#define SQUASHFS_NOD			1
++#define SQUASHFS_CHECK			2
++#define SQUASHFS_NOF			3
++#define SQUASHFS_NO_FRAG		4
++#define SQUASHFS_ALWAYS_FRAG		5
++#define SQUASHFS_DUPLICATE		6
++
++#define SQUASHFS_BIT(flag, bit)		((flag >> bit) & 1)
++
++#define SQUASHFS_UNCOMPRESSED_INODES(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOI)
++
++#define SQUASHFS_UNCOMPRESSED_DATA(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOD)
++
++#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOF)
++
++#define SQUASHFS_NO_FRAGMENTS(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_NO_FRAG)
++
++#define SQUASHFS_ALWAYS_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_ALWAYS_FRAG)
++
++#define SQUASHFS_DUPLICATES(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_DUPLICATE)
++
++#define SQUASHFS_CHECK_DATA(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_CHECK)
++
++#define SQUASHFS_MKFLAGS(noi, nod, check_data, nof, no_frag, always_frag, \
++		duplicate_checking)	(noi | (nod << 1) | (check_data << 2) \
++		| (nof << 3) | (no_frag << 4) | (always_frag << 5) | \
++		(duplicate_checking << 6))
++
++/* Max number of types and file types */
++#define SQUASHFS_DIR_TYPE		1
++#define SQUASHFS_FILE_TYPE		2
++#define SQUASHFS_SYMLINK_TYPE		3
++#define SQUASHFS_BLKDEV_TYPE		4
++#define SQUASHFS_CHRDEV_TYPE		5
++#define SQUASHFS_FIFO_TYPE		6
++#define SQUASHFS_SOCKET_TYPE		7
++#define SQUASHFS_LDIR_TYPE		8
++#define SQUASHFS_LREG_TYPE		9
++
++/* 1.0 filesystem type definitions */
++#define SQUASHFS_TYPES			5
++#define SQUASHFS_IPC_TYPE		0
++
++/* Flag whether block is compressed or uncompressed, bit is set if block is
++ * uncompressed */
++#define SQUASHFS_COMPRESSED_BIT		(1 << 15)
++
++#define SQUASHFS_COMPRESSED_SIZE(B)	(((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
++		(B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
++
++#define SQUASHFS_COMPRESSED(B)		(!((B) & SQUASHFS_COMPRESSED_BIT))
++
++#define SQUASHFS_COMPRESSED_BIT_BLOCK		(1 << 24)
++
++#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)	(((B) & \
++	~SQUASHFS_COMPRESSED_BIT_BLOCK) ? (B) & \
++	~SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT_BLOCK)
++
++#define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
++
++/*
++ * Inode number ops.  Inodes consist of a compressed block number, and an
++ * uncompressed  offset within that block
++ */
++#define SQUASHFS_INODE_BLK(a)		((unsigned int) ((a) >> 16))
++
++#define SQUASHFS_INODE_OFFSET(a)	((unsigned int) ((a) & 0xffff))
++
++#define SQUASHFS_MKINODE(A, B)		((squashfs_inode_t)(((squashfs_inode_t) (A)\
++					<< 16) + (B)))
++
++/* Compute 32 bit VFS inode number from squashfs inode number */
++#define SQUASHFS_MK_VFS_INODE(a, b)	((unsigned int) (((a) << 8) + \
++					((b) >> 2) + 1))
++/* XXX */
++
++/* Translate between VFS mode and squashfs mode */
++#define SQUASHFS_MODE(a)		((a) & 0xfff)
++
++/* fragment and fragment table defines */
++#define SQUASHFS_FRAGMENT_BYTES(A)	(A * sizeof(struct squashfs_fragment_entry))
++
++#define SQUASHFS_FRAGMENT_INDEX(A)	(SQUASHFS_FRAGMENT_BYTES(A) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)	(SQUASHFS_FRAGMENT_BYTES(A) % \
++						SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEXES(A)	((SQUASHFS_FRAGMENT_BYTES(A) + \
++					SQUASHFS_METADATA_SIZE - 1) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)	(SQUASHFS_FRAGMENT_INDEXES(A) *\
++						sizeof(long long))
++
++/* cached data constants for filesystem */
++#define SQUASHFS_CACHED_BLKS		8
++
++#define SQUASHFS_MAX_FILE_SIZE_LOG	64
++
++#define SQUASHFS_MAX_FILE_SIZE		((long long) 1 << \
++					(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
++
++#define SQUASHFS_MARKER_BYTE		0xff
++
++/* meta index cache */
++#define SQUASHFS_META_INDEXES	(SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
++#define SQUASHFS_META_ENTRIES	31
++#define SQUASHFS_META_NUMBER	8
++#define SQUASHFS_SLOTS		4
++
++struct meta_entry {
++	long long		data_block;
++	unsigned int		index_block;
++	unsigned short		offset;
++	unsigned short		pad;
++};
++
++struct meta_index {
++	unsigned int		inode_number;
++	unsigned int		offset;
++	unsigned short		entries;
++	unsigned short		skip;
++	unsigned short		locked;
++	unsigned short		pad;
++	struct meta_entry	meta_entry[SQUASHFS_META_ENTRIES];
++};
++
++
++/*
++ * definitions for structures on disk
++ */
++
++typedef long long		squashfs_block_t;
++typedef long long		squashfs_inode_t;
++
++struct squashfs_super_block {
++	unsigned int		s_magic;
++	unsigned int		inodes;
++	unsigned int		bytes_used_2;
++	unsigned int		uid_start_2;
++	unsigned int		guid_start_2;
++	unsigned int		inode_table_start_2;
++	unsigned int		directory_table_start_2;
++	unsigned int		s_major:16;
++	unsigned int		s_minor:16;
++	unsigned int		block_size_1:16;
++	unsigned int		block_log:16;
++	unsigned int		flags:8;
++	unsigned int		no_uids:8;
++	unsigned int		no_guids:8;
++	unsigned int		mkfs_time /* time of filesystem creation */;
++	squashfs_inode_t	root_inode;
++	unsigned int		block_size;
++	unsigned int		fragments;
++	unsigned int		fragment_table_start_2;
++	long long		bytes_used;
++	long long		uid_start;
++	long long		guid_start;
++	long long		inode_table_start;
++	long long		directory_table_start;
++	long long		fragment_table_start;
++	long long		unused;
++} __attribute__ ((packed));
++
++struct squashfs_dir_index {
++	unsigned int		index;
++	unsigned int		start_block;
++	unsigned char		size;
++	unsigned char		name[0];
++} __attribute__ ((packed));
++
++#define SQUASHFS_BASE_INODE_HEADER		\
++	unsigned int		inode_type:4;	\
++	unsigned int		mode:12;	\
++	unsigned int		uid:8;		\
++	unsigned int		guid:8;		\
++	unsigned int		mtime;		\
++	unsigned int 		inode_number;
++
++struct squashfs_base_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	squashfs_block_t	start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	unsigned int		file_size;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_lreg_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	squashfs_block_t	start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	long long		file_size;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		start_block;
++	unsigned int		parent_inode;
++} __attribute__  ((packed));
++
++struct squashfs_ldir_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned int		file_size:27;
++	unsigned int		offset:13;
++	unsigned int		start_block;
++	unsigned int		i_count:16;
++	unsigned int		parent_inode;
++	struct squashfs_dir_index	index[0];
++} __attribute__  ((packed));
++
++union squashfs_inode_header {
++	struct squashfs_base_inode_header	base;
++	struct squashfs_dev_inode_header	dev;
++	struct squashfs_symlink_inode_header	symlink;
++	struct squashfs_reg_inode_header	reg;
++	struct squashfs_lreg_inode_header	lreg;
++	struct squashfs_dir_inode_header	dir;
++	struct squashfs_ldir_inode_header	ldir;
++	struct squashfs_ipc_inode_header	ipc;
++};
++
++struct squashfs_dir_entry {
++	unsigned int		offset:13;
++	unsigned int		type:3;
++	unsigned int		size:8;
++	int			inode_number:16;
++	char			name[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_header {
++	unsigned int		count:8;
++	unsigned int		start_block;
++	unsigned int		inode_number;
++} __attribute__ ((packed));
++
++struct squashfs_fragment_entry {
++	long long		start_block;
++	unsigned int		size;
++	unsigned int		unused;
++} __attribute__ ((packed));
++
++extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen);
++extern int squashfs_uncompress_init(void);
++extern int squashfs_uncompress_exit(void);
++
++/*
++ * macros to convert each packed bitfield structure from little endian to big
++ * endian and vice versa.  These are needed when creating or using a filesystem
++ * on a machine with different byte ordering to the target architecture.
++ *
++ */
++
++#define SQUASHFS_SWAP_START \
++	int bits;\
++	int b_pos;\
++	unsigned long long val;\
++	unsigned char *s;\
++	unsigned char *d;
++
++#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block));\
++	SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
++	SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
++	SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
++	SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
++	SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
++	SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
++	SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
++	SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
++	SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
++	SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
++	SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
++	SQUASHFS_SWAP((s)->flags, d, 288, 8);\
++	SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
++	SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
++	SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
++	SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
++	SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
++	SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
++	SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
++	SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
++	SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
++	SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
++	SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
++	SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
++	SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
++	SQUASHFS_SWAP((s)->unused, d, 888, 64);\
++}
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
++	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
++	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
++	SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_ipc_inode_header))\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++}
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_dev_inode_header)); \
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_symlink_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_reg_inode_header));\
++	SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
++	SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 192, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
++}
++
++#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_lreg_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
++	SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 224, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_dir_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 147, 13);\
++	SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
++	SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
++}
++
++#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_ldir_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
++	SQUASHFS_SWAP((s)->offset, d, 155, 13);\
++	SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
++	SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
++	SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INDEX(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index));\
++	SQUASHFS_SWAP((s)->index, d, 0, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
++	SQUASHFS_SWAP((s)->size, d, 64, 8);\
++}
++
++#define SQUASHFS_SWAP_DIR_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header));\
++	SQUASHFS_SWAP((s)->count, d, 0, 8);\
++	SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
++	SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_ENTRY(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry));\
++	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
++	SQUASHFS_SWAP((s)->type, d, 13, 3);\
++	SQUASHFS_SWAP((s)->size, d, 16, 8);\
++	SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry));\
++	SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
++	SQUASHFS_SWAP((s)->size, d, 64, 32);\
++}
++
++#define SQUASHFS_SWAP_SHORTS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 2);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			16)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
++}
++
++#define SQUASHFS_SWAP_INTS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 4);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			32)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
++}
++
++#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 8);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			64)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
++}
++
++#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * bits / 8);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			bits)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++
++struct squashfs_base_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		type:4;
++	unsigned int		offset:4;
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		mtime;
++	unsigned int		start_block;
++	unsigned int		file_size:32;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++} __attribute__  ((packed));
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 4);\
++	SQUASHFS_SWAP((s)->guid, d, 20, 4);
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_ipc_inode_header_1));\
++	SQUASHFS_SWAP((s)->type, d, 24, 4);\
++	SQUASHFS_SWAP((s)->offset, d, 28, 4);\
++}
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_dev_inode_header_1));\
++	SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_symlink_inode_header_1));\
++	SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_reg_inode_header_1));\
++	SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_dir_inode_header_1));\
++	SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 43, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
++}
++
++#endif
++
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++
++struct squashfs_dir_index_2 {
++	unsigned int		index:27;
++	unsigned int		start_block:29;
++	unsigned char		size;
++	unsigned char		name[0];
++} __attribute__ ((packed));
++
++struct squashfs_base_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		mtime;
++	unsigned int		start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	unsigned int		file_size:32;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++} __attribute__  ((packed));
++
++struct squashfs_ldir_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		file_size:27;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++	unsigned int		i_count:16;
++	struct squashfs_dir_index_2	index[0];
++} __attribute__  ((packed));
++
++union squashfs_inode_header_2 {
++	struct squashfs_base_inode_header_2	base;
++	struct squashfs_dev_inode_header_2	dev;
++	struct squashfs_symlink_inode_header_2	symlink;
++	struct squashfs_reg_inode_header_2	reg;
++	struct squashfs_dir_inode_header_2	dir;
++	struct squashfs_ldir_inode_header_2	ldir;
++	struct squashfs_ipc_inode_header_2	ipc;
++};
++
++struct squashfs_dir_header_2 {
++	unsigned int		count:8;
++	unsigned int		start_block:24;
++} __attribute__ ((packed));
++
++struct squashfs_dir_entry_2 {
++	unsigned int		offset:13;
++	unsigned int		type:3;
++	unsigned int		size:8;
++	char			name[0];
++} __attribute__ ((packed));
++
++struct squashfs_fragment_entry_2 {
++	unsigned int		start_block;
++	unsigned int		size;
++} __attribute__ ((packed));
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
++	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
++	SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_dev_inode_header_2)); \
++	SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_symlink_inode_header_2));\
++	SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_reg_inode_header_2));\
++	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
++	SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 128, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_dir_inode_header_2));\
++	SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 51, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
++}
++
++#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_ldir_inode_header_2));\
++	SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
++	SQUASHFS_SWAP((s)->offset, d, 59, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
++	SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
++	SQUASHFS_SWAP((s)->index, d, 0, 27);\
++	SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
++	SQUASHFS_SWAP((s)->size, d, 56, 8);\
++}
++#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
++	SQUASHFS_SWAP((s)->count, d, 0, 8);\
++	SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
++}
++
++#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
++	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
++	SQUASHFS_SWAP((s)->type, d, 13, 3);\
++	SQUASHFS_SWAP((s)->size, d, 16, 8);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
++	SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
++	SQUASHFS_SWAP((s)->size, d, 32, 32);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS(s, d, n)
++
++/* fragment and fragment table defines */
++#define SQUASHFS_FRAGMENT_BYTES_2(A)	(A * sizeof(struct squashfs_fragment_entry_2))
++
++#define SQUASHFS_FRAGMENT_INDEX_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) % \
++						SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEXES_2(A)	((SQUASHFS_FRAGMENT_BYTES_2(A) + \
++					SQUASHFS_METADATA_SIZE - 1) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A)	(SQUASHFS_FRAGMENT_INDEXES_2(A) *\
++						sizeof(int))
++
++#endif
++
++#ifdef __KERNEL__
++
++/*
++ * macros used to swap each structure entry, taking into account
++ * bitfields and different bitfield placing conventions on differing
++ * architectures
++ */
++
++#include <asm/byteorder.h>
++
++#ifdef __BIG_ENDIAN
++	/* convert from little endian to big endian */
++#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
++		tbits, b_pos)
++#else
++	/* convert from big endian to little endian */
++#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
++		tbits, 64 - tbits - b_pos)
++#endif
++
++#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
++	b_pos = pos % 8;\
++	val = 0;\
++	s = (unsigned char *)p + (pos / 8);\
++	d = ((unsigned char *) &val) + 7;\
++	for(bits = 0; bits < (tbits + b_pos); bits += 8) \
++		*d-- = *s++;\
++	value = (val >> (SHIFT))/* & ((1 << tbits) - 1)*/;\
++}
++
++#define SQUASHFS_MEMSET(s, d, n)	memset(s, 0, n);
++
++#endif
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs_i.h linux-2.6.21.1.dev/include/linux/squashfs_fs_i.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs_i.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs_i.h	2007-05-26 19:00:37.143348416 +0200
+@@ -0,0 +1,45 @@
++#ifndef SQUASHFS_FS_I
++#define SQUASHFS_FS_I
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs_i.h
++ */
++
++struct squashfs_inode_info {
++	long long	start_block;
++	unsigned int	offset;
++	union {
++		struct {
++			long long	fragment_start_block;
++			unsigned int	fragment_size;
++			unsigned int	fragment_offset;
++			long long	block_list_start;
++		} s1;
++		struct {
++			long long	directory_index_start;
++			unsigned int	directory_index_offset;
++			unsigned int	directory_index_count;
++			unsigned int	parent_inode;
++		} s2;
++	} u;
++	struct inode	vfs_inode;
++};
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs_sb.h linux-2.6.21.1.dev/include/linux/squashfs_fs_sb.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs_sb.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs_sb.h	2007-05-26 19:00:37.144348264 +0200
+@@ -0,0 +1,74 @@
++#ifndef SQUASHFS_FS_SB
++#define SQUASHFS_FS_SB
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs_sb.h
++ */
++
++#include <linux/squashfs_fs.h>
++
++struct squashfs_cache {
++	long long	block;
++	int		length;
++	long long	next_index;
++	char		*data;
++};
++
++struct squashfs_fragment_cache {
++	long long	block;
++	int		length;
++	unsigned int	locked;
++	char		*data;
++};
++
++struct squashfs_sb_info {
++	struct squashfs_super_block	sblk;
++	int			devblksize;
++	int			devblksize_log2;
++	int			swap;
++	struct squashfs_cache	*block_cache;
++	struct squashfs_fragment_cache	*fragment;
++	int			next_cache;
++	int			next_fragment;
++	int			next_meta_index;
++	unsigned int		*uid;
++	unsigned int		*guid;
++	long long		*fragment_index;
++	unsigned int		*fragment_index_2;
++	unsigned int		read_size;
++	char			*read_data;
++	char			*read_page;
++	struct semaphore	read_data_mutex;
++	struct semaphore	read_page_mutex;
++	struct semaphore	block_cache_mutex;
++	struct semaphore	fragment_mutex;
++	struct semaphore	meta_index_mutex;
++	wait_queue_head_t	waitq;
++	wait_queue_head_t	fragment_wait_queue;
++	struct meta_index	*meta_index;
++	struct inode		*(*iget)(struct super_block *s,  squashfs_inode_t \
++				inode);
++	long long		(*read_blocklist)(struct inode *inode, int \
++				index, int readahead_blks, char *block_list, \
++				unsigned short **block_p, unsigned int *bsize);
++	int			(*read_fragment_index_table)(struct super_block *s);
++};
++#endif
+diff -urN linux-2.6.21.1.old/init/do_mounts_rd.c linux-2.6.21.1.dev/init/do_mounts_rd.c
+--- linux-2.6.21.1.old/init/do_mounts_rd.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/init/do_mounts_rd.c	2007-05-26 19:00:37.144348264 +0200
+@@ -5,6 +5,7 @@
+ #include <linux/ext2_fs.h>
+ #include <linux/romfs_fs.h>
+ #include <linux/cramfs_fs.h>
++#include <linux/squashfs_fs.h>
+ #include <linux/initrd.h>
+ #include <linux/string.h>
+ 
+@@ -39,6 +40,7 @@
+  * numbers could not be found.
+  *
+  * We currently check for the following magic numbers:
++ *      squashfs
+  * 	minix
+  * 	ext2
+  *	romfs
+@@ -53,6 +55,7 @@
+ 	struct ext2_super_block *ext2sb;
+ 	struct romfs_super_block *romfsb;
+ 	struct cramfs_super *cramfsb;
++	struct squashfs_super_block *squashfsb;
+ 	int nblocks = -1;
+ 	unsigned char *buf;
+ 
+@@ -64,6 +67,7 @@
+ 	ext2sb = (struct ext2_super_block *) buf;
+ 	romfsb = (struct romfs_super_block *) buf;
+ 	cramfsb = (struct cramfs_super *) buf;
++	squashfsb = (struct squashfs_super_block *) buf;
+ 	memset(buf, 0xe5, size);
+ 
+ 	/*
+@@ -101,6 +105,15 @@
+ 		goto done;
+ 	}
+ 
++	/* squashfs is at block zero too */
++	if (squashfsb->s_magic == SQUASHFS_MAGIC) {
++		printk(KERN_NOTICE
++		       "RAMDISK: squashfs filesystem found at block %d\n",
++		       start_block);
++		nblocks = (squashfsb->bytes_used+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
++		goto done;
++	}
++
+ 	/*
+ 	 * Read block 1 to test for minix and ext2 superblock
+ 	 */
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/002-lzma_decompress.patch
@@ -1,1 +1,792 @@
+diff -urN linux-2.6.21.1.old/include/linux/LzmaDecode.h linux-2.6.21.1.dev/include/linux/LzmaDecode.h
+--- linux-2.6.21.1.old/include/linux/LzmaDecode.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/LzmaDecode.h	2007-05-26 19:03:45.705682584 +0200
+@@ -0,0 +1,100 @@
++/*
++  LzmaDecode.h
++  LZMA Decoder interface
++
++  LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
++  http://www.7-zip.org/
++
++  LZMA SDK is licensed under two licenses:
++  1) GNU Lesser General Public License (GNU LGPL)
++  2) Common Public License (CPL)
++  It means that you can select one of these two licenses and
++  follow rules of that license.
++
++  SPECIAL EXCEPTION:
++  Igor Pavlov, as the author of this code, expressly permits you to
++  statically or dynamically link your code (or bind by name) to the
++  interfaces of this file without subjecting your linked code to the
++  terms of the CPL or GNU LGPL. Any modifications or additions
++  to this file, however, are subject to the LGPL or CPL terms.
++*/
++
++#ifndef __LZMADECODE_H
++#define __LZMADECODE_H
++
++/* #define _LZMA_IN_CB */
++/* Use callback for input data */
++
++/* #define _LZMA_OUT_READ */
++/* Use read function for output data */
++
++/* #define _LZMA_PROB32 */
++/* It can increase speed on some 32-bit CPUs,
++   but memory usage will be doubled in that case */
++
++/* #define _LZMA_LOC_OPT */
++/* Enable local speed optimizations inside code */
++
++#ifndef UInt32
++#ifdef _LZMA_UINT32_IS_ULONG
++#define UInt32 unsigned long
++#else
++#define UInt32 unsigned int
++#endif
++#endif
++
++#ifdef _LZMA_PROB32
++#define CProb UInt32
++#else
++#define CProb unsigned short
++#endif
++
++#define LZMA_RESULT_OK 0
++#define LZMA_RESULT_DATA_ERROR 1
++#define LZMA_RESULT_NOT_ENOUGH_MEM 2
++
++#ifdef _LZMA_IN_CB
++typedef struct _ILzmaInCallback
++{
++  int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize);
++} ILzmaInCallback;
++#endif
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++/*
++bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb)
++bufferSize += 100 in case of _LZMA_OUT_READ
++by default CProb is unsigned short,
++but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int)
++*/
++
++#ifdef _LZMA_OUT_READ
++int LzmaDecoderInit(
++    unsigned char *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    unsigned char *dictionary, UInt32 dictionarySize,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++  #else
++    unsigned char *inStream, UInt32 inSize
++  #endif
++);
++#endif
++
++int LzmaDecode(
++    unsigned char *buffer,
++  #ifndef _LZMA_OUT_READ
++    UInt32 bufferSize,
++    int lc, int lp, int pb,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback,
++  #else
++    unsigned char *inStream, UInt32 inSize,
++  #endif
++  #endif
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed);
++
++#endif
+diff -urN linux-2.6.21.1.old/lib/LzmaDecode.c linux-2.6.21.1.dev/lib/LzmaDecode.c
+--- linux-2.6.21.1.old/lib/LzmaDecode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/lib/LzmaDecode.c	2007-05-26 19:03:45.706682432 +0200
+@@ -0,0 +1,663 @@
++/*
++  LzmaDecode.c
++  LZMA Decoder
++
++  LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
++  http://www.7-zip.org/
++
++  LZMA SDK is licensed under two licenses:
++  1) GNU Lesser General Public License (GNU LGPL)
++  2) Common Public License (CPL)
++  It means that you can select one of these two licenses and
++  follow rules of that license.
++
++  SPECIAL EXCEPTION:
++  Igor Pavlov, as the author of this code, expressly permits you to
++  statically or dynamically link your code (or bind by name) to the
++  interfaces of this file without subjecting your linked code to the
++  terms of the CPL or GNU LGPL. Any modifications or additions
++  to this file, however, are subject to the LGPL or CPL terms.
++*/
++
++#include <linux/LzmaDecode.h>
++
++#ifndef Byte
++#define Byte unsigned char
++#endif
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++typedef struct _CRangeDecoder
++{
++  Byte *Buffer;
++  Byte *BufferLim;
++  UInt32 Range;
++  UInt32 Code;
++  #ifdef _LZMA_IN_CB
++  ILzmaInCallback *InCallback;
++  int Result;
++  #endif
++  int ExtraBytes;
++} CRangeDecoder;
++
++Byte RangeDecoderReadByte(CRangeDecoder *rd)
++{
++  if (rd->Buffer == rd->BufferLim)
++  {
++    #ifdef _LZMA_IN_CB
++    UInt32 size;
++    rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
++    rd->BufferLim = rd->Buffer + size;
++    if (size == 0)
++    #endif
++    {
++      rd->ExtraBytes = 1;
++      return 0xFF;
++    }
++  }
++  return (*rd->Buffer++);
++}
++
++/* #define ReadByte (*rd->Buffer++) */
++#define ReadByte (RangeDecoderReadByte(rd))
++
++void RangeDecoderInit(CRangeDecoder *rd,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++  #else
++    Byte *stream, UInt32 bufferSize
++  #endif
++    )
++{
++  int i;
++  #ifdef _LZMA_IN_CB
++  rd->InCallback = inCallback;
++  rd->Buffer = rd->BufferLim = 0;
++  #else
++  rd->Buffer = stream;
++  rd->BufferLim = stream + bufferSize;
++  #endif
++  rd->ExtraBytes = 0;
++  rd->Code = 0;
++  rd->Range = (0xFFFFFFFF);
++  for(i = 0; i < 5; i++)
++    rd->Code = (rd->Code << 8) | ReadByte;
++}
++
++#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;
++#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
++#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
++
++UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
++{
++  RC_INIT_VAR
++  UInt32 result = 0;
++  int i;
++  for (i = numTotalBits; i > 0; i--)
++  {
++    /* UInt32 t; */
++    range >>= 1;
++
++    result <<= 1;
++    if (code >= range)
++    {
++      code -= range;
++      result |= 1;
++    }
++    /*
++    t = (code - range) >> 31;
++    t &= 1;
++    code -= range & (t - 1);
++    result = (result + result) | (1 - t);
++    */
++    RC_NORMALIZE
++  }
++  RC_FLUSH_VAR
++  return result;
++}
++
++int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
++{
++  UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
++  if (rd->Code < bound)
++  {
++    rd->Range = bound;
++    *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
++    if (rd->Range < kTopValue)
++    {
++      rd->Code = (rd->Code << 8) | ReadByte;
++      rd->Range <<= 8;
++    }
++    return 0;
++  }
++  else
++  {
++    rd->Range -= bound;
++    rd->Code -= bound;
++    *prob -= (*prob) >> kNumMoveBits;
++    if (rd->Range < kTopValue)
++    {
++      rd->Code = (rd->Code << 8) | ReadByte;
++      rd->Range <<= 8;
++    }
++    return 1;
++  }
++}
++
++#define RC_GET_BIT2(prob, mi, A0, A1) \
++  UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
++  if (code < bound) \
++    { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
++  else \
++    { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
++  RC_NORMALIZE
++
++#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)
++
++int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
++{
++  int mi = 1;
++  int i;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  for(i = numLevels; i > 0; i--)
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + mi;
++    RC_GET_BIT(prob, mi)
++    #else
++    mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
++    #endif
++  }
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return mi - (1 << numLevels);
++}
++
++int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
++{
++  int mi = 1;
++  int i;
++  int symbol = 0;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  for(i = 0; i < numLevels; i++)
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + mi;
++    RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
++    #else
++    int bit = RangeDecoderBitDecode(probs + mi, rd);
++    mi = mi + mi + bit;
++    symbol |= (bit << i);
++    #endif
++  }
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
++{
++  int symbol = 1;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  do
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + symbol;
++    RC_GET_BIT(prob, symbol)
++    #else
++    symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
++    #endif
++  }
++  while (symbol < 0x100);
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
++{
++  int symbol = 1;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  do
++  {
++    int bit;
++    int matchBit = (matchByte >> 7) & 1;
++    matchByte <<= 1;
++    #ifdef _LZMA_LOC_OPT
++    {
++      CProb *prob = probs + ((1 + matchBit) << 8) + symbol;
++      RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
++    }
++    #else
++    bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd);
++    symbol = (symbol << 1) | bit;
++    #endif
++    if (matchBit != bit)
++    {
++      while (symbol < 0x100)
++      {
++        #ifdef _LZMA_LOC_OPT
++        CProb *prob = probs + symbol;
++        RC_GET_BIT(prob, symbol)
++        #else
++        symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
++        #endif
++      }
++      break;
++    }
++  }
++  while (symbol < 0x100);
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
++{
++  if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
++    return RangeDecoderBitTreeDecode(p + LenLow +
++        (posState << kLenNumLowBits), kLenNumLowBits, rd);
++  if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
++    return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
++        (posState << kLenNumMidBits), kLenNumMidBits, rd);
++  return kLenNumLowSymbols + kLenNumMidSymbols +
++      RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
++}
++
++#define kNumStates 12
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#ifdef _LZMA_OUT_READ
++
++typedef struct _LzmaVarState
++{
++  CRangeDecoder RangeDecoder;
++  Byte *Dictionary;
++  UInt32 DictionarySize;
++  UInt32 DictionaryPos;
++  UInt32 GlobalPos;
++  UInt32 Reps[4];
++  int lc;
++  int lp;
++  int pb;
++  int State;
++  int PreviousIsMatch;
++  int RemainLen;
++} LzmaVarState;
++
++int LzmaDecoderInit(
++    unsigned char *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    unsigned char *dictionary, UInt32 dictionarySize,
++    #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++    #else
++    unsigned char *inStream, UInt32 inSize
++    #endif
++    )
++{
++  LzmaVarState *vs = (LzmaVarState *)buffer;
++  CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
++  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
++  UInt32 i;
++  if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState))
++    return LZMA_RESULT_NOT_ENOUGH_MEM;
++  vs->Dictionary = dictionary;
++  vs->DictionarySize = dictionarySize;
++  vs->DictionaryPos = 0;
++  vs->GlobalPos = 0;
++  vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1;
++  vs->lc = lc;
++  vs->lp = lp;
++  vs->pb = pb;
++  vs->State = 0;
++  vs->PreviousIsMatch = 0;
++  vs->RemainLen = 0;
++  dictionary[dictionarySize - 1] = 0;
++  for (i = 0; i < numProbs; i++)
++    p[i] = kBitModelTotal >> 1;
++  RangeDecoderInit(&vs->RangeDecoder,
++      #ifdef _LZMA_IN_CB
++      inCallback
++      #else
++      inStream, inSize
++      #endif
++  );
++  return LZMA_RESULT_OK;
++}
++
++int LzmaDecode(unsigned char *buffer,
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed)
++{
++  LzmaVarState *vs = (LzmaVarState *)buffer;
++  CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
++  CRangeDecoder rd = vs->RangeDecoder;
++  int state = vs->State;
++  int previousIsMatch = vs->PreviousIsMatch;
++  Byte previousByte;
++  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
++  UInt32 nowPos = 0;
++  UInt32 posStateMask = (1 << (vs->pb)) - 1;
++  UInt32 literalPosMask = (1 << (vs->lp)) - 1;
++  int lc = vs->lc;
++  int len = vs->RemainLen;
++  UInt32 globalPos = vs->GlobalPos;
++
++  Byte *dictionary = vs->Dictionary;
++  UInt32 dictionarySize = vs->DictionarySize;
++  UInt32 dictionaryPos = vs->DictionaryPos;
++
++  if (len == -1)
++  {
++    *outSizeProcessed = 0;
++    return LZMA_RESULT_OK;
++  }
++
++  while(len > 0 && nowPos < outSize)
++  {
++    UInt32 pos = dictionaryPos - rep0;
++    if (pos >= dictionarySize)
++      pos += dictionarySize;
++    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
++    if (++dictionaryPos == dictionarySize)
++      dictionaryPos = 0;
++    len--;
++  }
++  if (dictionaryPos == 0)
++    previousByte = dictionary[dictionarySize - 1];
++  else
++    previousByte = dictionary[dictionaryPos - 1];
++#else
++
++int LzmaDecode(
++    Byte *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback,
++    #else
++    unsigned char *inStream, UInt32 inSize,
++    #endif
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed)
++{
++  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
++  CProb *p = (CProb *)buffer;
++  CRangeDecoder rd;
++  UInt32 i;
++  int state = 0;
++  int previousIsMatch = 0;
++  Byte previousByte = 0;
++  UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
++  UInt32 nowPos = 0;
++  UInt32 posStateMask = (1 << pb) - 1;
++  UInt32 literalPosMask = (1 << lp) - 1;
++  int len = 0;
++  if (bufferSize < numProbs * sizeof(CProb))
++    return LZMA_RESULT_NOT_ENOUGH_MEM;
++  for (i = 0; i < numProbs; i++)
++    p[i] = kBitModelTotal >> 1;
++  RangeDecoderInit(&rd,
++      #ifdef _LZMA_IN_CB
++      inCallback
++      #else
++      inStream, inSize
++      #endif
++      );
++#endif
++
++  *outSizeProcessed = 0;
++  while(nowPos < outSize)
++  {
++    int posState = (int)(
++        (nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++        & posStateMask);
++    #ifdef _LZMA_IN_CB
++    if (rd.Result != LZMA_RESULT_OK)
++      return rd.Result;
++    #endif
++    if (rd.ExtraBytes != 0)
++      return LZMA_RESULT_DATA_ERROR;
++    if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
++    {
++      CProb *probs = p + Literal + (LZMA_LIT_SIZE *
++        (((
++        (nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++        & literalPosMask) << lc) + (previousByte >> (8 - lc))));
++
++      if (state < 4) state = 0;
++      else if (state < 10) state -= 3;
++      else state -= 6;
++      if (previousIsMatch)
++      {
++        Byte matchByte;
++        #ifdef _LZMA_OUT_READ
++        UInt32 pos = dictionaryPos - rep0;
++        if (pos >= dictionarySize)
++          pos += dictionarySize;
++        matchByte = dictionary[pos];
++        #else
++        matchByte = outStream[nowPos - rep0];
++        #endif
++        previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
++        previousIsMatch = 0;
++      }
++      else
++        previousByte = LzmaLiteralDecode(probs, &rd);
++      outStream[nowPos++] = previousByte;
++      #ifdef _LZMA_OUT_READ
++      dictionary[dictionaryPos] = previousByte;
++      if (++dictionaryPos == dictionarySize)
++        dictionaryPos = 0;
++      #endif
++    }
++    else
++    {
++      previousIsMatch = 1;
++      if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
++      {
++        if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
++        {
++          if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
++          {
++            #ifdef _LZMA_OUT_READ
++            UInt32 pos;
++            #endif
++            if (
++               (nowPos
++                #ifdef _LZMA_OUT_READ
++                + globalPos
++                #endif
++               )
++               == 0)
++              return LZMA_RESULT_DATA_ERROR;
++            state = state < 7 ? 9 : 11;
++            #ifdef _LZMA_OUT_READ
++            pos = dictionaryPos - rep0;
++            if (pos >= dictionarySize)
++              pos += dictionarySize;
++            previousByte = dictionary[pos];
++            dictionary[dictionaryPos] = previousByte;
++            if (++dictionaryPos == dictionarySize)
++              dictionaryPos = 0;
++            #else
++            previousByte = outStream[nowPos - rep0];
++            #endif
++            outStream[nowPos++] = previousByte;
++            continue;
++          }
++        }
++        else
++        {
++          UInt32 distance;
++          if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
++            distance = rep1;
++          else
++          {
++            if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
++              distance = rep2;
++            else
++            {
++              distance = rep3;
++              rep3 = rep2;
++            }
++            rep2 = rep1;
++          }
++          rep1 = rep0;
++          rep0 = distance;
++        }
++        len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
++        state = state < 7 ? 8 : 11;
++      }
++      else
++      {
++        int posSlot;
++        rep3 = rep2;
++        rep2 = rep1;
++        rep1 = rep0;
++        state = state < 7 ? 7 : 10;
++        len = LzmaLenDecode(p + LenCoder, &rd, posState);
++        posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
++            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++            kNumPosSlotBits), kNumPosSlotBits, &rd);
++        if (posSlot >= kStartPosModelIndex)
++        {
++          int numDirectBits = ((posSlot >> 1) - 1);
++          rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
++          if (posSlot < kEndPosModelIndex)
++          {
++            rep0 += RangeDecoderReverseBitTreeDecode(
++                p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
++          }
++          else
++          {
++            rep0 += RangeDecoderDecodeDirectBits(&rd,
++                numDirectBits - kNumAlignBits) << kNumAlignBits;
++            rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
++          }
++        }
++        else
++          rep0 = posSlot;
++        rep0++;
++      }
++      if (rep0 == (UInt32)(0))
++      {
++        /* it's for stream version */
++        len = -1;
++        break;
++      }
++      if (rep0 > nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++      {
++        return LZMA_RESULT_DATA_ERROR;
++      }
++      len += kMatchMinLen;
++      do
++      {
++        #ifdef _LZMA_OUT_READ
++        UInt32 pos = dictionaryPos - rep0;
++        if (pos >= dictionarySize)
++          pos += dictionarySize;
++        previousByte = dictionary[pos];
++        dictionary[dictionaryPos] = previousByte;
++        if (++dictionaryPos == dictionarySize)
++          dictionaryPos = 0;
++        #else
++        previousByte = outStream[nowPos - rep0];
++        #endif
++        outStream[nowPos++] = previousByte;
++        len--;
++      }
++      while(len > 0 && nowPos < outSize);
++    }
++  }
++
++  #ifdef _LZMA_OUT_READ
++  vs->RangeDecoder = rd;
++  vs->DictionaryPos = dictionaryPos;
++  vs->GlobalPos = globalPos + nowPos;
++  vs->Reps[0] = rep0;
++  vs->Reps[1] = rep1;
++  vs->Reps[2] = rep2;
++  vs->Reps[3] = rep3;
++  vs->State = state;
++  vs->PreviousIsMatch = previousIsMatch;
++  vs->RemainLen = len;
++  #endif
++
++  *outSizeProcessed = nowPos;
++  return LZMA_RESULT_OK;
++}
+diff -urN linux-2.6.21.1.old/lib/Makefile linux-2.6.21.1.dev/lib/Makefile
+--- linux-2.6.21.1.old/lib/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/lib/Makefile	2007-05-26 19:03:45.721680152 +0200
+@@ -13,7 +13,7 @@
+ lib-y	+= kobject.o kref.o kobject_uevent.o klist.o
+ 
+ obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+-	 bust_spinlocks.o hexdump.o
++	 bust_spinlocks.o hexdump.o LzmaDecode.o
+ 
+ ifeq ($(CONFIG_DEBUG_KOBJECT),y)
+ CFLAGS_kobject.o += -DDEBUG
+@@ -58,6 +58,7 @@
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+ 
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++
+ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+ 
+ lib-$(CONFIG_GENERIC_BUG) += bug.o
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/003-squashfs_lzma.patch
@@ -1,1 +1,109 @@
+diff -urN linux-2.6.21.1.old/fs/squashfs/inode.c linux-2.6.21.1.dev/fs/squashfs/inode.c
+--- linux-2.6.21.1.old/fs/squashfs/inode.c	2007-05-26 19:03:45.499713896 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/inode.c	2007-05-26 19:07:27.951896024 +0200
+@@ -4,6 +4,9 @@
+  * Copyright (c) 2002, 2003, 2004, 2005, 2006
+  * Phillip Lougher <phillip@lougher.org.uk>
+  *
++ * LZMA decompressor support added by Oleg I. Vdovikin
++ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su>
++ *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License
+  * as published by the Free Software Foundation; either version 2,
+@@ -21,6 +24,7 @@
+  * inode.c
+  */
+ 
++#define SQUASHFS_LZMA
+ #include <linux/types.h>
+ #include <linux/squashfs_fs.h>
+ #include <linux/module.h>
+@@ -44,6 +48,19 @@
+ 
+ #include "squashfs.h"
+ 
++#ifdef SQUASHFS_LZMA
++#include <linux/LzmaDecode.h>
++
++/* default LZMA settings, should be in sync with mksquashfs */
++#define LZMA_LC 3
++#define LZMA_LP 0
++#define LZMA_PB 2
++
++#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \
++      (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb))
++
++#endif
++
+ static void squashfs_put_super(struct super_block *);
+ static int squashfs_statfs(struct dentry *, struct kstatfs *);
+ static int squashfs_symlink_readpage(struct file *file, struct page *page);
+@@ -64,7 +81,11 @@
+ 			const char *, void *, struct vfsmount *);
+ 
+ 
++#ifdef SQUASHFS_LZMA
++static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE];
++#else
+ static z_stream stream;
++#endif
+ 
+ static struct file_system_type squashfs_fs_type = {
+ 	.owner = THIS_MODULE,
+@@ -249,6 +270,15 @@
+ 	if (compressed) {
+ 		int zlib_err;
+ 
++#ifdef SQUASHFS_LZMA
++		if ((zlib_err = LzmaDecode(lzma_workspace,
++			LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB,
++			c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK)
++		{
++			ERROR("lzma returned unexpected result 0x%x\n", zlib_err);
++			bytes = 0;
++		}
++#else
+ 		stream.next_in = c_buffer;
+ 		stream.avail_in = c_byte;
+ 		stream.next_out = buffer;
+@@ -263,7 +293,7 @@
+ 			bytes = 0;
+ 		} else
+ 			bytes = stream.total_out;
+-
++#endif
+ 		up(&msblk->read_data_mutex);
+ 	}
+ 
+@@ -2045,15 +2075,19 @@
+ 	printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
+ 		"Phillip Lougher\n");
+ 
++#ifndef SQUASHFS_LZMA
+ 	if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
+ 		ERROR("Failed to allocate zlib workspace\n");
+ 		destroy_inodecache();
+ 		err = -ENOMEM;
+ 		goto out;
+ 	}
++#endif
+ 
+ 	if ((err = register_filesystem(&squashfs_fs_type))) {
++#ifndef SQUASHFS_LZMA
+ 		vfree(stream.workspace);
++#endif
+ 		destroy_inodecache();
+ 	}
+ 
+@@ -2064,7 +2098,9 @@
+ 
+ static void __exit exit_squashfs_fs(void)
+ {
++#ifndef SQUASHFS_LZMA
+ 	vfree(stream.workspace);
++#endif
+ 	unregister_filesystem(&squashfs_fs_type);
+ 	destroy_inodecache();
+ }
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/004-extra_optimization.patch
@@ -1,1 +1,14 @@
+diff -urN linux-2.6.21.1.old/Makefile linux-2.6.21.1.dev/Makefile
+--- linux-2.6.21.1.old/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/Makefile	2007-05-26 19:14:22.967804016 +0200
+@@ -507,6 +507,9 @@
+ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+ CHECKFLAGS     += $(NOSTDINC_FLAGS)
+ 
++# improve gcc optimization
++CFLAGS += $(call cc-option,-funit-at-a-time,)
++
+ # warn about C99 declaration after statement
+ CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
+ 
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/006-gcc4_inline_fix.patch
@@ -1,1 +1,13 @@
+diff -urN linux-2.6.21.1.old/include/asm-mips/system.h linux-2.6.21.1.dev/include/asm-mips/system.h
+--- linux-2.6.21.1.old/include/asm-mips/system.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/asm-mips/system.h	2007-05-26 19:26:30.870146040 +0200
+@@ -188,7 +188,7 @@
+    if something tries to do an invalid xchg().  */
+ extern void __xchg_called_with_bad_pointer(void);
+ 
+-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+ {
+ 	switch (size) {
+ 	case 4:
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/007-samsung_flash.patch
@@ -1,1 +1,38 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0002.c linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0002.c
+--- linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0002.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0002.c	2007-05-26 19:30:01.049193968 +0200
+@@ -51,6 +51,7 @@
+ #define SST49LF040B	        0x0050
+ #define SST49LF008A		0x005a
+ #define AT49BV6416		0x00d6
++#define MANUFACTURER_SAMSUNG	0x00ec
+ 
+ static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+ static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+@@ -294,12 +295,19 @@
+ 
+ 		if (extp->MajorVersion != '1' ||
+ 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+-			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
+-			       "version %c.%c.\n",  extp->MajorVersion,
+-			       extp->MinorVersion);
+-			kfree(extp);
+-			kfree(mtd);
+-			return NULL;
++		        if (cfi->mfr == MANUFACTURER_SAMSUNG &&
++			    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
++			    printk(KERN_NOTICE "  Newer Samsung flash detected, "
++			           "should be compatibile with Amd/Fujitsu.\n");
++		        }
++		        else {
++			    printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
++			           "version %c.%c.\n",  extp->MajorVersion,
++			           extp->MinorVersion);
++			    kfree(extp);
++			    kfree(mtd);
++			    return NULL;
++		        }
+ 		}
+ 
+ 		/* Install our own private info structure */
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/009-revert_intel_flash_breakage.patch
@@ -1,1 +1,171 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0001.c
+--- linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0001.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0001.c	2007-05-26 19:40:46.809023552 +0200
+@@ -933,7 +933,7 @@
+ 
+ static int __xipram xip_wait_for_operation(
+ 		struct map_info *map, struct flchip *chip,
+-		unsigned long adr, unsigned int chip_op_time )
++		unsigned long adr, int *chip_op_time )
+ {
+ 	struct cfi_private *cfi = map->fldrv_priv;
+ 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+@@ -942,7 +942,7 @@
+ 	flstate_t oldstate, newstate;
+ 
+        	start = xip_currtime();
+-	usec = chip_op_time * 8;
++	usec = *chip_op_time * 8;
+ 	if (usec == 0)
+ 		usec = 500000;
+ 	done = 0;
+@@ -1052,8 +1052,8 @@
+ #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
+ 	INVALIDATE_CACHED_RANGE(map, from, size)
+ 
+-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
+-	xip_wait_for_operation(map, chip, cmd_adr, usec)
++#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
++	xip_wait_for_operation(map, chip, cmd_adr, p_usec)
+ 
+ #else
+ 
+@@ -1065,65 +1065,65 @@
+ static int inval_cache_and_wait_for_operation(
+ 		struct map_info *map, struct flchip *chip,
+ 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
+-		unsigned int chip_op_time)
++		int *chip_op_time )
+ {
+ 	struct cfi_private *cfi = map->fldrv_priv;
+ 	map_word status, status_OK = CMD(0x80);
+-	int chip_state = chip->state;
+-	unsigned int timeo, sleep_time;
++	int z, chip_state = chip->state;
++	unsigned long timeo;
+ 
+ 	spin_unlock(chip->mutex);
+ 	if (inval_len)
+ 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
++	if (*chip_op_time)
++		cfi_udelay(*chip_op_time);
+ 	spin_lock(chip->mutex);
+ 
+-	/* set our timeout to 8 times the expected delay */
+-	timeo = chip_op_time * 8;
+-	if (!timeo)
+-		timeo = 500000;
+-	sleep_time = chip_op_time / 2;
++	timeo = *chip_op_time * 8 * HZ / 1000000;
++	if (timeo < HZ/2)
++		timeo = HZ/2;
++	timeo += jiffies;
+ 
++	z = 0;
+ 	for (;;) {
++		if (chip->state != chip_state) {
++			/* Someone's suspended the operation: sleep */
++			DECLARE_WAITQUEUE(wait, current);
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			add_wait_queue(&chip->wq, &wait);
++			spin_unlock(chip->mutex);
++			schedule();
++			remove_wait_queue(&chip->wq, &wait);
++			timeo = jiffies + (HZ / 2); /* FIXME */
++			spin_lock(chip->mutex);
++			continue;
++		}
++
+ 		status = map_read(map, cmd_adr);
+ 		if (map_word_andequal(map, status, status_OK, status_OK))
+ 			break;
+ 
+-		if (!timeo) {
++		/* OK Still waiting */
++		if (time_after(jiffies, timeo)) {
+ 			map_write(map, CMD(0x70), cmd_adr);
+ 			chip->state = FL_STATUS;
+ 			return -ETIME;
+ 		}
+ 
+-		/* OK Still waiting. Drop the lock, wait a while and retry. */
++		/* Latency issues. Drop the lock, wait a while and retry */
++		z++;
+ 		spin_unlock(chip->mutex);
+-		if (sleep_time >= 1000000/HZ) {
+-			/*
+-			 * Half of the normal delay still remaining
+-			 * can be performed with a sleeping delay instead
+-			 * of busy waiting.
+-			 */
+-			msleep(sleep_time/1000);
+-			timeo -= sleep_time;
+-			sleep_time = 1000000/HZ;
+-		} else {
+-			udelay(1);
+-			cond_resched();
+-			timeo--;
+-		}
++		cfi_udelay(1);
+ 		spin_lock(chip->mutex);
+-
+-		while (chip->state != chip_state) {
+-			/* Someone's suspended the operation: sleep */
+-			DECLARE_WAITQUEUE(wait, current);
+-			set_current_state(TASK_UNINTERRUPTIBLE);
+-			add_wait_queue(&chip->wq, &wait);
+-			spin_unlock(chip->mutex);
+-			schedule();
+-			remove_wait_queue(&chip->wq, &wait);
+-			spin_lock(chip->mutex);
+-		}
+ 	}
+ 
++	if (!z) {
++		if (!--(*chip_op_time))
++			*chip_op_time = 1;
++	} else if (z > 1)
++		++(*chip_op_time);
++
+ 	/* Done and happy. */
+  	chip->state = FL_STATUS;
+ 	return 0;
+@@ -1132,7 +1132,8 @@
+ #endif
+ 
+ #define WAIT_TIMEOUT(map, chip, adr, udelay) \
+-	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
++	({ int __udelay = (udelay); \
++	   INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
+ 
+ 
+ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
+@@ -1356,7 +1357,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, map_bankwidth(map),
+-				   chip->word_write_time);
++				   &chip->word_write_time);
+ 	if (ret) {
+ 		xip_enable(map, chip, adr);
+ 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
+@@ -1593,7 +1594,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
+ 				   adr, len,
+-				   chip->buffer_write_time);
++				   &chip->buffer_write_time);
+ 	if (ret) {
+ 		map_write(map, CMD(0x70), cmd_adr);
+ 		chip->state = FL_STATUS;
+@@ -1728,7 +1729,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, len,
+-				   chip->erase_time);
++				   &chip->erase_time);
+ 	if (ret) {
+ 		map_write(map, CMD(0x70), adr);
+ 		chip->state = FL_STATUS;
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/010-disable_old_squashfs_compatibility.patch
@@ -1,1 +1,22 @@
+diff -urN linux-2.6.21.1.old/fs/squashfs/Makefile linux-2.6.21.1.dev/fs/squashfs/Makefile
+--- linux-2.6.21.1.old/fs/squashfs/Makefile	2007-05-26 19:03:45.499713896 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/Makefile	2007-05-26 19:43:37.064140840 +0200
+@@ -4,4 +4,3 @@
+ 
+ obj-$(CONFIG_SQUASHFS) += squashfs.o
+ squashfs-y += inode.o
+-squashfs-y += squashfs2_0.o
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs.h linux-2.6.21.1.dev/fs/squashfs/squashfs.h
+--- linux-2.6.21.1.old/fs/squashfs/squashfs.h	2007-05-26 19:03:45.500713744 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs.h	2007-05-26 19:43:37.075139168 +0200
+@@ -24,6 +24,9 @@
+ #ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+ #undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+ #endif
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#undef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#endif
+ 
+ #ifdef SQUASHFS_TRACE
+ #define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/011-mips_boot.patch
@@ -1,1 +1,21 @@
+diff -urN linux-2.6.21.1.old/arch/mips/kernel/head.S linux-2.6.21.1.dev/arch/mips/kernel/head.S
+--- linux-2.6.21.1.old/arch/mips/kernel/head.S	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/kernel/head.S	2007-05-26 19:46:49.061952736 +0200
+@@ -129,11 +129,15 @@
+ #endif
+ 	.endm
+ 
++
++	j kernel_entry
++	nop
++
+ 	/*
+ 	 * Reserved space for exception handlers.
+ 	 * Necessary for machines which link their kernels at KSEG0.
+ 	 */
+-	.fill	0x400
++	.align 10
+ 
+ EXPORT(stext)					# used for profiling
+ EXPORT(_stext)
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/012-mips_cpu_tlb.patch
@@ -1,1 +1,20 @@
+diff -urN linux-2.6.21.1.old/arch/mips/mm/tlbex.c linux-2.6.21.1.dev/arch/mips/mm/tlbex.c
+--- linux-2.6.21.1.old/arch/mips/mm/tlbex.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/mm/tlbex.c	2007-05-26 19:50:19.046030304 +0200
+@@ -887,7 +887,6 @@
+ 	case CPU_R10000:
+ 	case CPU_R12000:
+ 	case CPU_R14000:
+-	case CPU_4KC:
+ 	case CPU_SB1:
+ 	case CPU_SB1A:
+ 	case CPU_4KSC:
+@@ -915,6 +914,7 @@
+ 		tlbw(p);
+ 		break;
+ 
++	case CPU_4KC:
+ 	case CPU_4KEC:
+ 	case CPU_24K:
+ 	case CPU_34K:
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/013-mips_generic_gpio_support.patch
@@ -1,1 +1,36 @@
+diff -urN linux-2.6.21.1.old/arch/mips/defconfig linux-2.6.21.1.dev/arch/mips/defconfig
+--- linux-2.6.21.1.old/arch/mips/defconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/defconfig	2007-05-26 19:55:17.542651920 +0200
+@@ -69,6 +69,7 @@
+ CONFIG_GENERIC_HWEIGHT=y
+ CONFIG_GENERIC_CALIBRATE_DELAY=y
+ CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_GPIO=n
+ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+ # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+ CONFIG_ARC=y
+diff -urN linux-2.6.21.1.old/arch/mips/Kconfig linux-2.6.21.1.dev/arch/mips/Kconfig
+--- linux-2.6.21.1.old/arch/mips/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/Kconfig	2007-05-26 19:55:17.500658304 +0200
+@@ -704,6 +704,10 @@
+ 	bool
+ 	default y
+ 
++config GENERIC_GPIO
++	bool
++	default n
++
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ 	bool
+ 	default y
+diff -urN linux-2.6.21.1.old/include/asm-mips/gpio.h linux-2.6.21.1.dev/include/asm-mips/gpio.h
+--- linux-2.6.21.1.old/include/asm-mips/gpio.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/asm-mips/gpio.h	2007-05-26 19:55:17.500658304 +0200
+@@ -0,0 +1,6 @@
++#ifndef _ASM_MIPS_GPIO_H
++#define _ASM_MIPS_GPIO_H
++
++#include <gpio.h>
++
++#endif /* _ASM_MIPS_GPIO_H */
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/060-rootfs_split.patch
@@ -1,1 +1,411 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/Kconfig linux-2.6.21.1.dev/drivers/mtd/Kconfig
+--- linux-2.6.21.1.old/drivers/mtd/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/Kconfig	2007-05-26 19:58:42.320520952 +0200
+@@ -47,6 +47,11 @@
+ 	  devices. Partitioning on NFTL 'devices' is a different - that's the
+ 	  'normal' form of partitioning used on a block device.
+ 
++config MTD_SPLIT_ROOTFS
++	bool "Automatically split rootfs partition for squashfs"
++	depends on MTD_PARTITIONS
++	default y
++
+ config MTD_REDBOOT_PARTS
+ 	tristate "RedBoot partition table parsing"
+ 	depends on MTD_PARTITIONS
+diff -urN linux-2.6.21.1.old/drivers/mtd/mtdpart.c linux-2.6.21.1.dev/drivers/mtd/mtdpart.c
+--- linux-2.6.21.1.old/drivers/mtd/mtdpart.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/mtdpart.c	2007-05-26 19:58:42.331519280 +0200
+@@ -20,6 +20,8 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/compatmac.h>
++#include <linux/squashfs_fs.h>
++#include <linux/root_dev.h>
+ 
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+@@ -308,6 +310,171 @@
+ 	return 0;
+ }
+ 
++static u_int32_t cur_offset = 0;
++static int add_mtd_partition(struct mtd_info *master, const struct mtd_partition *part, int i)
++{
++	struct mtd_part *slave;
++	
++	/* allocate the partition structure */
++	slave = kzalloc (sizeof(*slave), GFP_KERNEL);
++	if (!slave) {
++		printk ("memory allocation error while creating partitions for \"%s\"\n",
++			master->name);
++		del_mtd_partitions(master);
++		return -ENOMEM;
++	}
++	list_add(&slave->list, &mtd_partitions);
++
++	/* set up the MTD object for this partition */
++	slave->mtd.type = master->type;
++	slave->mtd.flags = master->flags & ~part->mask_flags;
++	slave->mtd.size = part->size;
++	slave->mtd.writesize = master->writesize;
++	slave->mtd.oobsize = master->oobsize;
++	slave->mtd.oobavail = master->oobavail;
++	slave->mtd.subpage_sft = master->subpage_sft;
++
++	slave->mtd.name = part->name;
++	slave->mtd.owner = master->owner;
++
++	slave->mtd.read = part_read;
++	slave->mtd.write = part_write;
++
++	if(master->point && master->unpoint){
++		slave->mtd.point = part_point;
++		slave->mtd.unpoint = part_unpoint;
++	}
++
++	if (master->read_oob)
++		slave->mtd.read_oob = part_read_oob;
++	if (master->write_oob)
++		slave->mtd.write_oob = part_write_oob;
++	if(master->read_user_prot_reg)
++		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
++	if(master->read_fact_prot_reg)
++		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
++	if(master->write_user_prot_reg)
++		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
++	if(master->lock_user_prot_reg)
++		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
++	if(master->get_user_prot_info)
++		slave->mtd.get_user_prot_info = part_get_user_prot_info;
++	if(master->get_fact_prot_info)
++		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
++	if (master->sync)
++		slave->mtd.sync = part_sync;
++	if (!i && master->suspend && master->resume) {
++			slave->mtd.suspend = part_suspend;
++			slave->mtd.resume = part_resume;
++	}
++	if (master->writev)
++		slave->mtd.writev = part_writev;
++	if (master->lock)
++		slave->mtd.lock = part_lock;
++	if (master->unlock)
++		slave->mtd.unlock = part_unlock;
++	if (master->block_isbad)
++		slave->mtd.block_isbad = part_block_isbad;
++	if (master->block_markbad)
++		slave->mtd.block_markbad = part_block_markbad;
++	slave->mtd.erase = part_erase;
++	slave->master = master;
++	slave->offset = part->offset;
++	slave->index = i;
++
++	if (slave->offset == MTDPART_OFS_APPEND)
++		slave->offset = cur_offset;
++	if (slave->offset == MTDPART_OFS_NXTBLK) {
++		slave->offset = cur_offset;
++		if ((cur_offset % master->erasesize) != 0) {
++			/* Round up to next erasesize */
++			slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
++			printk(KERN_NOTICE "Moving partition %d: "
++			       "0x%08x -> 0x%08x\n", i,
++			       cur_offset, slave->offset);
++		}
++	}
++	if (slave->mtd.size == MTDPART_SIZ_FULL)
++		slave->mtd.size = master->size - slave->offset;
++	cur_offset = slave->offset + slave->mtd.size;
++
++	printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
++		slave->offset + slave->mtd.size, slave->mtd.name);
++
++	/* let's do some sanity checks */
++	if (slave->offset >= master->size) {
++			/* let's register it anyway to preserve ordering */
++		slave->offset = 0;
++		slave->mtd.size = 0;
++		printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
++			part->name);
++	}
++	if (slave->offset + slave->mtd.size > master->size) {
++		slave->mtd.size = master->size - slave->offset;
++		printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
++			part->name, master->name, slave->mtd.size);
++	}
++	if (master->numeraseregions>1) {
++		/* Deal with variable erase size stuff */
++		int i;
++		struct mtd_erase_region_info *regions = master->eraseregions;
++
++		/* Find the first erase regions which is part of this partition. */
++		for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
++			;
++
++		for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
++			if (slave->mtd.erasesize < regions[i].erasesize) {
++				slave->mtd.erasesize = regions[i].erasesize;
++			}
++		}
++	} else {
++		/* Single erase size */
++		slave->mtd.erasesize = master->erasesize;
++	}
++
++	if ((slave->mtd.flags & MTD_WRITEABLE) &&
++	    (slave->offset % slave->mtd.erasesize)) {
++		/* Doesn't start on a boundary of major erase size */
++		/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
++		slave->mtd.flags &= ~MTD_WRITEABLE;
++		printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
++			part->name);
++	}
++	if ((slave->mtd.flags & MTD_WRITEABLE) &&
++	    (slave->mtd.size % slave->mtd.erasesize)) {
++		slave->mtd.flags &= ~MTD_WRITEABLE;
++		printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
++			part->name);
++	}
++
++	slave->mtd.ecclayout = master->ecclayout;
++	if (master->block_isbad) {
++		uint32_t offs = 0;
++
++		while(offs < slave->mtd.size) {
++			if (master->block_isbad(master,
++						offs + slave->offset))
++				slave->mtd.ecc_stats.badblocks++;
++			offs += slave->mtd.erasesize;
++		}
++	}
++
++	if(part->mtdp)
++	{	/* store the object pointer (caller may or may not register it */
++		*part->mtdp = &slave->mtd;
++		slave->registered = 0;
++	}
++	else
++	{
++		/* register our partition */
++		add_mtd_device(&slave->mtd);
++		slave->registered = 1;
++	}
++
++	return 0;
++}
++
+ /*
+  * This function, given a master MTD object and a partition table, creates
+  * and registers slave MTD objects which are bound to the master according to
+@@ -319,169 +487,53 @@
+ 		       const struct mtd_partition *parts,
+ 		       int nbparts)
+ {
+-	struct mtd_part *slave;
+-	u_int32_t cur_offset = 0;
+-	int i;
++	struct mtd_partition *part;
++	int i, ret = 0;
+ 
+ 	printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ 
+ 	for (i = 0; i < nbparts; i++) {
++		part = (struct mtd_partition *) &parts[i];
++		ret = add_mtd_partition(master, part, i);
++		if (ret)
++			return ret;
++		if (strcmp(part->name, "rootfs") == 0) {
++#ifdef CONFIG_MTD_SPLIT_ROOTFS
++			int len;
++			char buf[512];
++			struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
++#define ROOTFS_SPLIT_NAME "rootfs_data"
++			if ((master->read(master, part->offset, sizeof(struct squashfs_super_block), &len, buf) == 0) &&
++					(len == sizeof(struct squashfs_super_block)) &&
++					(*((u32 *) buf) == SQUASHFS_MAGIC) &&
++					(sb->bytes_used > 0)) {
++
++				
++				part = kmalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++				memcpy(part, &parts[i], sizeof(struct mtd_partition));
++				
++				part->name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++				strcpy(part->name, ROOTFS_SPLIT_NAME);
++
++				len = (u32) sb->bytes_used;
++				len += (part->offset & 0x000fffff);
++				len +=  (master->erasesize - 1);
++				len &= ~(master->erasesize - 1);
++				len -= (part->offset & 0x000fffff);
++				part->offset += len;
++				part->size -= len;
++			
++				if (master->erasesize <= part->size)
++					ret = add_mtd_partition(master, part, i + 1);
++				else
++					kfree(part->name);
++				if (ret)
++					return ret;
+ 
+-		/* allocate the partition structure */
+-		slave = kzalloc (sizeof(*slave), GFP_KERNEL);
+-		if (!slave) {
+-			printk ("memory allocation error while creating partitions for \"%s\"\n",
+-				master->name);
+-			del_mtd_partitions(master);
+-			return -ENOMEM;
+-		}
+-		list_add(&slave->list, &mtd_partitions);
+-
+-		/* set up the MTD object for this partition */
+-		slave->mtd.type = master->type;
+-		slave->mtd.flags = master->flags & ~parts[i].mask_flags;
+-		slave->mtd.size = parts[i].size;
+-		slave->mtd.writesize = master->writesize;
+-		slave->mtd.oobsize = master->oobsize;
+-		slave->mtd.oobavail = master->oobavail;
+-		slave->mtd.subpage_sft = master->subpage_sft;
+-
+-		slave->mtd.name = parts[i].name;
+-		slave->mtd.owner = master->owner;
+-
+-		slave->mtd.read = part_read;
+-		slave->mtd.write = part_write;
+-
+-		if(master->point && master->unpoint){
+-			slave->mtd.point = part_point;
+-			slave->mtd.unpoint = part_unpoint;
+-		}
+-
+-		if (master->read_oob)
+-			slave->mtd.read_oob = part_read_oob;
+-		if (master->write_oob)
+-			slave->mtd.write_oob = part_write_oob;
+-		if(master->read_user_prot_reg)
+-			slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+-		if(master->read_fact_prot_reg)
+-			slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+-		if(master->write_user_prot_reg)
+-			slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+-		if(master->lock_user_prot_reg)
+-			slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+-		if(master->get_user_prot_info)
+-			slave->mtd.get_user_prot_info = part_get_user_prot_info;
+-		if(master->get_fact_prot_info)
+-			slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+-		if (master->sync)
+-			slave->mtd.sync = part_sync;
+-		if (!i && master->suspend && master->resume) {
+-				slave->mtd.suspend = part_suspend;
+-				slave->mtd.resume = part_resume;
+-		}
+-		if (master->writev)
+-			slave->mtd.writev = part_writev;
+-		if (master->lock)
+-			slave->mtd.lock = part_lock;
+-		if (master->unlock)
+-			slave->mtd.unlock = part_unlock;
+-		if (master->block_isbad)
+-			slave->mtd.block_isbad = part_block_isbad;
+-		if (master->block_markbad)
+-			slave->mtd.block_markbad = part_block_markbad;
+-		slave->mtd.erase = part_erase;
+-		slave->master = master;
+-		slave->offset = parts[i].offset;
+-		slave->index = i;
+-
+-		if (slave->offset == MTDPART_OFS_APPEND)
+-			slave->offset = cur_offset;
+-		if (slave->offset == MTDPART_OFS_NXTBLK) {
+-			slave->offset = cur_offset;
+-			if ((cur_offset % master->erasesize) != 0) {
+-				/* Round up to next erasesize */
+-				slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+-				printk(KERN_NOTICE "Moving partition %d: "
+-				       "0x%08x -> 0x%08x\n", i,
+-				       cur_offset, slave->offset);
++				kfree(part);
+ 			}
+-		}
+-		if (slave->mtd.size == MTDPART_SIZ_FULL)
+-			slave->mtd.size = master->size - slave->offset;
+-		cur_offset = slave->offset + slave->mtd.size;
+-
+-		printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+-			slave->offset + slave->mtd.size, slave->mtd.name);
+-
+-		/* let's do some sanity checks */
+-		if (slave->offset >= master->size) {
+-				/* let's register it anyway to preserve ordering */
+-			slave->offset = 0;
+-			slave->mtd.size = 0;
+-			printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
+-				parts[i].name);
+-		}
+-		if (slave->offset + slave->mtd.size > master->size) {
+-			slave->mtd.size = master->size - slave->offset;
+-			printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
+-				parts[i].name, master->name, slave->mtd.size);
+-		}
+-		if (master->numeraseregions>1) {
+-			/* Deal with variable erase size stuff */
+-			int i;
+-			struct mtd_erase_region_info *regions = master->eraseregions;
+-
+-			/* Find the first erase regions which is part of this partition. */
+-			for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
+-				;
+-
+-			for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
+-				if (slave->mtd.erasesize < regions[i].erasesize) {
+-					slave->mtd.erasesize = regions[i].erasesize;
+-				}
+-			}
+-		} else {
+-			/* Single erase size */
+-			slave->mtd.erasesize = master->erasesize;
+-		}
+-
+-		if ((slave->mtd.flags & MTD_WRITEABLE) &&
+-		    (slave->offset % slave->mtd.erasesize)) {
+-			/* Doesn't start on a boundary of major erase size */
+-			/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
+-			slave->mtd.flags &= ~MTD_WRITEABLE;
+-			printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+-				parts[i].name);
+-		}
+-		if ((slave->mtd.flags & MTD_WRITEABLE) &&
+-		    (slave->mtd.size % slave->mtd.erasesize)) {
+-			slave->mtd.flags &= ~MTD_WRITEABLE;
+-			printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+-				parts[i].name);
+-		}
+-
+-		slave->mtd.ecclayout = master->ecclayout;
+-		if (master->block_isbad) {
+-			uint32_t offs = 0;
+-
+-			while(offs < slave->mtd.size) {
+-				if (master->block_isbad(master,
+-							offs + slave->offset))
+-					slave->mtd.ecc_stats.badblocks++;
+-				offs += slave->mtd.erasesize;
+-			}
+-		}
+-
+-		if(parts[i].mtdp)
+-		{	/* store the object pointer (caller may or may not register it */
+-			*parts[i].mtdp = &slave->mtd;
+-			slave->registered = 0;
+-		}
+-		else
+-		{
+-			/* register our partition */
+-			add_mtd_device(&slave->mtd);
+-			slave->registered = 1;
++#endif /* CONFIG_MTD_SPLIT_ROOTFS */
++			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, i);
+ 		}
+ 	}
+ 
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/065-block2mtd_init.patch
@@ -1,1 +1,114 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/devices/block2mtd.c linux-2.6.21.1.dev/drivers/mtd/devices/block2mtd.c
+--- linux-2.6.21.1.old/drivers/mtd/devices/block2mtd.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/devices/block2mtd.c	2007-05-26 20:06:13.547923960 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/buffer_head.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+@@ -237,10 +238,11 @@
+ 
+ 
+ /* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size)
++static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+ {
+ 	struct block_device *bdev;
+ 	struct block2mtd_dev *dev;
++	struct mtd_partition *part;
+ 
+ 	if (!devname)
+ 		return NULL;
+@@ -279,14 +281,18 @@
+ 
+ 	/* Setup the MTD structure */
+ 	/* make the name contain the block device in */
+-	dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
+-			GFP_KERNEL);
++
++	if (!mtdname)
++		mtdname = devname;
++
++	dev->mtd.name = kmalloc(strlen(mtdname), GFP_KERNEL);
++
+ 	if (!dev->mtd.name)
+ 		goto devinit_err;
++	
++	strcpy(dev->mtd.name, mtdname);
+ 
+-	sprintf(dev->mtd.name, "block2mtd: %s", devname);
+-
+-	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ 	dev->mtd.erasesize = erase_size;
+ 	dev->mtd.writesize = 1;
+ 	dev->mtd.type = MTD_RAM;
+@@ -298,15 +304,18 @@
+ 	dev->mtd.read = block2mtd_read;
+ 	dev->mtd.priv = dev;
+ 	dev->mtd.owner = THIS_MODULE;
+-
+-	if (add_mtd_device(&dev->mtd)) {
++	
++	part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++	part->name = dev->mtd.name;
++	part->offset = 0;
++	part->size = dev->mtd.size;
++	if (add_mtd_partitions(&dev->mtd, part, 1)) {
+ 		/* Device didnt get added, so free the entry */
+ 		goto devinit_err;
+ 	}
+ 	list_add(&dev->list, &blkmtd_device_list);
+ 	INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
+-			dev->mtd.name + strlen("blkmtd: "),
+-			dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++			mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ 	return dev;
+ 
+ devinit_err:
+@@ -379,9 +388,9 @@
+ 
+ static int block2mtd_setup2(const char *val)
+ {
+-	char buf[80 + 12]; /* 80 for device, 12 for erase size */
++	char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+ 	char *str = buf;
+-	char *token[2];
++	char *token[3];
+ 	char *name;
+ 	size_t erase_size = PAGE_SIZE;
+ 	int i, ret;
+@@ -392,7 +401,7 @@
+ 	strcpy(str, val);
+ 	kill_final_newline(str);
+ 
+-	for (i = 0; i < 2; i++)
++	for (i = 0; i < 3; i++)
+ 		token[i] = strsep(&str, ",");
+ 
+ 	if (str)
+@@ -412,8 +421,10 @@
+ 			parse_err("illegal erase size");
+ 		}
+ 	}
++	if (token[2] && (strlen(token[2]) + 1 > 80))
++		parse_err("mtd device name too long");
+ 
+-	add_device(name, erase_size);
++	add_device(name, erase_size, token[2]);
+ 
+ 	return 0;
+ }
+@@ -447,7 +458,7 @@
+ 
+ 
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+ 
+ static int __init block2mtd_init(void)
+ {
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/100-netfilter_layer7_2.8.patch
@@ -1,1 +1,2035 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:13:52.648130120 +0200
+@@ -0,0 +1,26 @@
++/*
++  By Matthew Strait <quadong@users.sf.net>, Dec 2003.
++  http://l7-filter.sf.net
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version
++  2 of the License, or (at your option) any later version.
++  http://www.gnu.org/licenses/gpl.txt
++*/
++
++#ifndef _IPT_LAYER7_H
++#define _IPT_LAYER7_H
++
++#define MAX_PATTERN_LEN 8192
++#define MAX_PROTOCOL_LEN 256
++
++typedef char *(*proc_ipt_search) (char *, char, char *);
++
++struct ipt_layer7_info {
++    char protocol[MAX_PROTOCOL_LEN];
++    char invert:1;
++    char pattern[MAX_PATTERN_LEN];
++};
++
++#endif /* _IPT_LAYER7_H */
+diff -urN linux-2.6.21.1.old/net/netfilter/nf_conntrack_core.c linux-2.6.21.1.dev/net/netfilter/nf_conntrack_core.c
+--- linux-2.6.21.1.old/net/netfilter/nf_conntrack_core.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/nf_conntrack_core.c	2007-05-26 20:13:52.649129968 +0200
+@@ -330,6 +330,13 @@
+ 	 * too. */
+ 	nf_ct_remove_expectations(ct);
+ 
++	#if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE)
++	if(ct->layer7.app_proto)
++		kfree(ct->layer7.app_proto);
++	if(ct->layer7.app_data)
++		kfree(ct->layer7.app_data);
++	#endif
++
+ 	/* We overload first tuple to link into unconfirmed list. */
+ 	if (!nf_ct_is_confirmed(ct)) {
+ 		BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+diff -urN linux-2.6.21.1.old/net/netfilter/nf_conntrack_standalone.c linux-2.6.21.1.dev/net/netfilter/nf_conntrack_standalone.c
+--- linux-2.6.21.1.old/net/netfilter/nf_conntrack_standalone.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/nf_conntrack_standalone.c	2007-05-26 20:13:52.649129968 +0200
+@@ -184,6 +184,12 @@
+ 		return -ENOSPC;
+ #endif
+ 
++#if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE)
++	if(conntrack->layer7.app_proto)
++		if (seq_printf(s, "l7proto=%s ",conntrack->layer7.app_proto))
++			return 1;
++#endif
++
+ 	if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
+ 		return -ENOSPC;
+ 	
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:13:52.650129816 +0200
+@@ -0,0 +1,573 @@
++/*
++  Kernel module to match application layer (OSI layer 7) data in connections.
++
++  http://l7-filter.sf.net
++
++  By Matthew Strait and Ethan Sommer, 2003-2006.
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version
++  2 of the License, or (at your option) any later version.
++  http://www.gnu.org/licenses/gpl.txt
++
++  Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>
++  and cls_layer7.c (C) 2003 Matthew Strait, Ethan Sommer, Justin Levandoski
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++
++#include "regexp/regexp.c"
++
++#include <linux/netfilter_ipv4/ipt_layer7.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("iptables application layer match module");
++MODULE_VERSION("2.0");
++
++static int maxdatalen = 2048; // this is the default
++module_param(maxdatalen, int, 0444);
++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++	#define DPRINTK(format,args...) printk(format,##args)
++#else
++	#define DPRINTK(format,args...)
++#endif
++
++#define TOTAL_PACKETS master_conntrack->counters[IP_CT_DIR_ORIGINAL].packets + \
++		      master_conntrack->counters[IP_CT_DIR_REPLY].packets
++
++/* Number of packets whose data we look at.
++This can be modified through /proc/net/layer7_numpackets */
++static int num_packets = 10;
++
++static struct pattern_cache {
++	char * regex_string;
++	regexp * pattern;
++	struct pattern_cache * next;
++} * first_pattern_cache = NULL;
++
++/* I'm new to locking.  Here are my assumptions:
++
++- No one will write to /proc/net/layer7_numpackets over and over very fast;
++  if they did, nothing awful would happen.
++
++- This code will never be processing the same packet twice at the same time,
++  because iptables rules are traversed in order.
++
++- It doesn't matter if two packets from different connections are in here at
++  the same time, because they don't share any data.
++
++- It _does_ matter if two packets from the same connection (or one from a
++  master and one from its child) are here at the same time.  In this case,
++  we have to protect the conntracks and the list of compiled patterns.
++*/
++DEFINE_RWLOCK(ct_lock);
++DEFINE_SPINLOCK(list_lock);
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++/* Converts an unfriendly string into a friendly one by
++replacing unprintables with periods and all whitespace with " ". */
++static char * friendly_print(unsigned char * s)
++{
++	char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
++	int i;
++
++	if(!f) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in friendly_print, bailing.\n");
++		return NULL;
++	}
++
++	for(i = 0; i < strlen(s); i++){
++		if(isprint(s[i]) && s[i] < 128)	f[i] = s[i];
++		else if(isspace(s[i]))		f[i] = ' ';
++		else 				f[i] = '.';
++	}
++	f[i] = '\0';
++	return f;
++}
++
++static char dec2hex(int i)
++{
++	switch (i) {
++		case 0 ... 9:
++			return (char)(i + '0');
++			break;
++		case 10 ... 15:
++			return (char)(i - 10 + 'a');
++			break;
++		default:
++			if (net_ratelimit())
++				printk("Problem in dec2hex\n");
++			return '\0';
++	}
++}
++
++static char * hex_print(unsigned char * s)
++{
++	char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
++	int i;
++
++	if(!g) {
++	       if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in hex_print, bailing.\n");
++	       return NULL;
++	}
++
++	for(i = 0; i < strlen(s); i++) {
++		g[i*3    ] = dec2hex(s[i]/16);
++		g[i*3 + 1] = dec2hex(s[i]%16);
++		g[i*3 + 2] = ' ';
++	}
++	g[i*3] = '\0';
++
++	return g;
++}
++#endif // DEBUG
++
++/* Use instead of regcomp.  As we expect to be seeing the same regexps over and
++over again, it make sense to cache the results. */
++static regexp * compile_and_cache(char * regex_string, char * protocol)
++{
++	struct pattern_cache * node               = first_pattern_cache;
++	struct pattern_cache * last_pattern_cache = first_pattern_cache;
++	struct pattern_cache * tmp;
++	unsigned int len;
++
++	while (node != NULL) {
++		if (!strcmp(node->regex_string, regex_string))
++		return node->pattern;
++
++		last_pattern_cache = node;/* points at the last non-NULL node */
++		node = node->next;
++	}
++
++	/* If we reach the end of the list, then we have not yet cached
++	   the pattern for this regex. Let's do that now.
++	   Be paranoid about running out of memory to avoid list corruption. */
++	tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
++
++	if(!tmp) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in compile_and_cache, bailing.\n");
++		return NULL;
++	}
++
++	tmp->regex_string  = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
++	tmp->pattern       = kmalloc(sizeof(struct regexp),    GFP_ATOMIC);
++	tmp->next = NULL;
++
++	if(!tmp->regex_string || !tmp->pattern) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in compile_and_cache, bailing.\n");
++		kfree(tmp->regex_string);
++		kfree(tmp->pattern);
++		kfree(tmp);
++		return NULL;
++	}
++
++	/* Ok.  The new node is all ready now. */
++	node = tmp;
++
++	if(first_pattern_cache == NULL) /* list is empty */
++		first_pattern_cache = node; /* make node the beginning */
++	else
++		last_pattern_cache->next = node; /* attach node to the end */
++
++	/* copy the string and compile the regex */
++	len = strlen(regex_string);
++	DPRINTK("About to compile this: \"%s\"\n", regex_string);
++	node->pattern = regcomp(regex_string, &len);
++	if ( !node->pattern ) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: Error compiling regexp \"%s\" (%s)\n", regex_string, protocol);
++		/* pattern is now cached as NULL, so we won't try again. */
++	}
++
++	strcpy(node->regex_string, regex_string);
++	return node->pattern;
++}
++
++static int can_handle(const struct sk_buff *skb)
++{
++	if(!ip_hdr(skb)) /* not IP */
++		return 0;
++	if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
++	   ip_hdr(skb)->protocol != IPPROTO_UDP &&
++	   ip_hdr(skb)->protocol != IPPROTO_ICMP)
++		return 0;
++	return 1;
++}
++
++/* Returns offset the into the skb->data that the application data starts */
++static int app_data_offset(const struct sk_buff *skb)
++{
++	/* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
++	isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
++	int ip_hl = ip_hdrlen(skb);
++
++	if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
++		/* 12 == offset into TCP header for the header length field.
++		Can't get this with skb->h.th->doff because the tcphdr
++		struct doesn't get set when routing (this is confirmed to be
++		true in Netfilter as well as QoS.) */
++		int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
++
++		return ip_hl + tcp_hl;
++	} else if( ip_hdr(skb)->protocol == IPPROTO_UDP  ) {
++		return ip_hl + 8; /* UDP header is always 8 bytes */
++	} else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
++		return ip_hl + 8; /* ICMP header is 8 bytes */
++	} else {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: tried to handle unknown protocol!\n");
++		return ip_hl + 8; /* something reasonable */
++	}
++}
++
++/* handles whether there's a match when we aren't appending data anymore */
++static int match_no_append(struct ip_conntrack * conntrack, struct ip_conntrack * master_conntrack,
++			enum ip_conntrack_info ctinfo, enum ip_conntrack_info master_ctinfo,
++			struct ipt_layer7_info * info)
++{
++	/* If we're in here, throw the app data away */
++	write_lock(&ct_lock);
++	if(master_conntrack->layer7.app_data != NULL) {
++
++	#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++		if(!master_conntrack->layer7.app_proto) {
++			char * f = friendly_print(master_conntrack->layer7.app_data);
++			char * g = hex_print(master_conntrack->layer7.app_data);
++			DPRINTK("\nl7-filter gave up after %d bytes (%d packets):\n%s\n",
++				strlen(f), TOTAL_PACKETS, f);
++			kfree(f);
++			DPRINTK("In hex: %s\n", g);
++			kfree(g);
++		}
++	#endif
++
++		kfree(master_conntrack->layer7.app_data);
++		master_conntrack->layer7.app_data = NULL; /* don't free again */
++	}
++	write_unlock(&ct_lock);
++
++	if(master_conntrack->layer7.app_proto){
++		/* Here child connections set their .app_proto (for /proc/net/ip_conntrack) */
++		write_lock(&ct_lock);
++		if(!conntrack->layer7.app_proto) {
++			conntrack->layer7.app_proto = kmalloc(strlen(master_conntrack->layer7.app_proto)+1, GFP_ATOMIC);
++			if(!conntrack->layer7.app_proto){
++				if (net_ratelimit())
++					printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n");
++				write_unlock(&ct_lock);
++				return 1;
++			}
++			strcpy(conntrack->layer7.app_proto, master_conntrack->layer7.app_proto);
++		}
++		write_unlock(&ct_lock);
++
++		return (!strcmp(master_conntrack->layer7.app_proto, info->protocol));
++	}
++	else {
++		/* If not classified, set to "unknown" to distinguish from
++		connections that are still being tested. */
++		write_lock(&ct_lock);
++		master_conntrack->layer7.app_proto = kmalloc(strlen("unknown")+1, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_proto){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n");
++			write_unlock(&ct_lock);
++			return 1;
++		}
++		strcpy(master_conntrack->layer7.app_proto, "unknown");
++		write_unlock(&ct_lock);
++		return 0;
++	}
++}
++
++/* add the new app data to the conntrack.  Return number of bytes added. */
++static int add_data(struct ip_conntrack * master_conntrack,
++			char * app_data, int appdatalen)
++{
++	int length = 0, i;
++	int oldlength = master_conntrack->layer7.app_data_len;
++
++	// This is a fix for a race condition by Deti Fliegl. However, I'm not 
++	// clear on whether the race condition exists or whether this really 
++	// fixes it.  I might just be being dense... Anyway, if it's not really 
++	// a fix, all it does is waste a very small amount of time.
++	if(!master_conntrack->layer7.app_data) return 0;
++
++	/* Strip nulls. Make everything lower case (our regex lib doesn't
++	do case insensitivity).  Add it to the end of the current data. */
++	for(i = 0; i < maxdatalen-oldlength-1 &&
++		   i < appdatalen; i++) {
++		if(app_data[i] != '\0') {
++			master_conntrack->layer7.app_data[length+oldlength] =
++				/* the kernel version of tolower mungs 'upper ascii' */
++				isascii(app_data[i])? tolower(app_data[i]) : app_data[i];
++			length++;
++		}
++	}
++
++	master_conntrack->layer7.app_data[length+oldlength] = '\0';
++	master_conntrack->layer7.app_data_len = length + oldlength;
++
++	return length;
++}
++
++/* Returns true on match and false otherwise.  */
++static int match(const struct sk_buff *skbin,
++	const struct net_device *in, const struct net_device *out,
++	const struct xt_match *match, const void *matchinfo,
++	int offset, unsigned int protoff, int *hotdrop)
++{
++	/* sidestep const without getting a compiler warning... */
++	struct sk_buff * skb = (struct sk_buff *)skbin; 
++
++	struct ipt_layer7_info * info = (struct ipt_layer7_info *)matchinfo;
++	enum ip_conntrack_info master_ctinfo, ctinfo;
++	struct ip_conntrack *master_conntrack, *conntrack;
++	unsigned char * app_data;
++	unsigned int pattern_result, appdatalen;
++	regexp * comppattern;
++
++	if(!can_handle(skb)){
++		DPRINTK("layer7: This is some protocol I can't handle.\n");
++		return info->invert;
++	}
++
++	/* Treat parent & all its children together as one connection, except
++	for the purpose of setting conntrack->layer7.app_proto in the actual
++	connection. This makes /proc/net/ip_conntrack more satisfying. */
++	if(!(conntrack = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)) ||
++	   !(master_conntrack = ip_conntrack_get((struct sk_buff *)skb, &master_ctinfo))) {
++		//DPRINTK("layer7: packet is not from a known connection, giving up.\n");
++		return info->invert;
++	}
++
++	/* Try to get a master conntrack (and its master etc) for FTP, etc. */
++	while (master_ct(master_conntrack) != NULL)
++		master_conntrack = master_ct(master_conntrack);
++
++	/* if we've classified it or seen too many packets */
++	if(TOTAL_PACKETS > num_packets ||
++	   master_conntrack->layer7.app_proto) {
++
++		pattern_result = match_no_append(conntrack, master_conntrack, ctinfo, master_ctinfo, info);
++
++		/* skb->cb[0] == seen. Avoid doing things twice if there are two l7
++		rules. I'm not sure that using cb for this purpose is correct, although
++		it says "put your private variables there". But it doesn't look like it
++		is being used for anything else in the skbs that make it here. How can
++		I write to cb without making the compiler angry? */
++		skb->cb[0] = 1; /* marking it seen here is probably irrelevant, but consistant */
++
++		return (pattern_result ^ info->invert);
++	}
++
++	if(skb_is_nonlinear(skb)){
++		if(skb_linearize(skb) != 0){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: failed to linearize packet, bailing.\n");
++			return info->invert;
++		}
++	}
++
++	/* now that the skb is linearized, it's safe to set these. */
++	app_data = skb->data + app_data_offset(skb);
++	appdatalen = skb->tail - app_data;
++
++	spin_lock_bh(&list_lock);
++	/* the return value gets checked later, when we're ready to use it */
++	comppattern = compile_and_cache(info->pattern, info->protocol);
++	spin_unlock_bh(&list_lock);
++
++	/* On the first packet of a connection, allocate space for app data */
++	write_lock(&ct_lock);
++	if(TOTAL_PACKETS == 1 && !skb->cb[0] && !master_conntrack->layer7.app_data) {
++		master_conntrack->layer7.app_data = kmalloc(maxdatalen, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_data){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			write_unlock(&ct_lock);
++			return info->invert;
++		}
++
++		master_conntrack->layer7.app_data[0] = '\0';
++	}
++	write_unlock(&ct_lock);
++
++	/* Can be here, but unallocated, if numpackets is increased near
++	the beginning of a connection */
++	if(master_conntrack->layer7.app_data == NULL)
++		return (info->invert); /* unmatched */
++
++	if(!skb->cb[0]){
++		int newbytes;
++		write_lock(&ct_lock);
++		newbytes = add_data(master_conntrack, app_data, appdatalen);
++		write_unlock(&ct_lock);
++
++		if(newbytes == 0) { /* didn't add any data */
++			skb->cb[0] = 1;
++			/* Didn't match before, not going to match now */
++			return info->invert;
++		}
++	}
++
++	/* If looking for "unknown", then never match.  "Unknown" means that
++	we've given up; we're still trying with these packets. */
++	read_lock(&ct_lock);
++	if(!strcmp(info->protocol, "unknown")) {
++		pattern_result = 0;
++	/* If the regexp failed to compile, don't bother running it */
++	} else if(comppattern && regexec(comppattern, master_conntrack->layer7.app_data)) {
++		DPRINTK("layer7: matched %s\n", info->protocol);
++		pattern_result = 1;
++	} else pattern_result = 0;
++	read_unlock(&ct_lock);
++
++	if(pattern_result) {
++		write_lock(&ct_lock);
++		master_conntrack->layer7.app_proto = kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_proto){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			write_unlock(&ct_lock);
++			return (pattern_result ^ info->invert);
++		}
++		strcpy(master_conntrack->layer7.app_proto, info->protocol);
++		write_unlock(&ct_lock);
++	}
++
++	/* mark the packet seen */
++	skb->cb[0] = 1;
++
++	return (pattern_result ^ info->invert);
++}
++
++static struct ipt_match layer7_match = {
++	.name = "layer7",
++	.match = &match,
++	.matchsize  = sizeof(struct ipt_layer7_info),
++	.me = THIS_MODULE
++};
++
++/* taken from drivers/video/modedb.c */
++static int my_atoi(const char *s)
++{
++	int val = 0;
++
++	for (;; s++) {
++		switch (*s) {
++			case '0'...'9':
++			val = 10*val+(*s-'0');
++			break;
++		default:
++			return val;
++		}
++	}
++}
++
++/* write out num_packets to userland. */
++static int layer7_read_proc(char* page, char ** start, off_t off, int count,
++		     int* eof, void * data)
++{
++	if(num_packets > 99 && net_ratelimit())
++		printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
++
++	page[0] = num_packets/10 + '0';
++	page[1] = num_packets%10 + '0';
++	page[2] = '\n';
++	page[3] = '\0';
++
++	*eof=1;
++
++	return 3;
++}
++
++/* Read in num_packets from userland */
++static int layer7_write_proc(struct file* file, const char* buffer,
++		      unsigned long count, void *data)
++{
++	char * foo = kmalloc(count, GFP_ATOMIC);
++
++	if(!foo){
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory, bailing. num_packets unchanged.\n");
++		return count;
++	}
++
++	if(copy_from_user(foo, buffer, count)) {
++		return -EFAULT;
++	}
++
++
++	num_packets = my_atoi(foo);
++	kfree (foo);
++
++	/* This has an arbitrary limit to make the math easier. I'm lazy.
++	But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
++	if(num_packets > 99) {
++		printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
++		num_packets = 99;
++	} else if(num_packets < 1) {
++		printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
++		num_packets = 1;
++	}
++
++	return count;
++}
++
++/* register the proc file */
++static void layer7_init_proc(void)
++{
++	struct proc_dir_entry* entry;
++	entry = create_proc_entry("layer7_numpackets", 0644, proc_net);
++	entry->read_proc = layer7_read_proc;
++	entry->write_proc = layer7_write_proc;
++}
++
++static void layer7_cleanup_proc(void)
++{
++	remove_proc_entry("layer7_numpackets", proc_net);
++}
++
++static int __init ipt_layer7_init(void)
++{
++	need_conntrack();
++
++	layer7_init_proc();
++	if(maxdatalen < 1) {
++		printk(KERN_WARNING "layer7: maxdatalen can't be < 1, using 1\n");
++		maxdatalen = 1;
++	}
++	/* This is not a hard limit.  It's just here to prevent people from
++	bringing their slow machines to a grinding halt. */
++	else if(maxdatalen > 65536) {
++		printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, using 65536\n");
++		maxdatalen = 65536;
++	}
++	return ipt_register_match(&layer7_match);
++}
++
++static void __exit ipt_layer7_fini(void)
++{
++	layer7_cleanup_proc();
++	ipt_unregister_match(&layer7_match);
++}
++
++module_init(ipt_layer7_init);
++module_exit(ipt_layer7_fini);
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:13:52.650129816 +0200
+@@ -63,6 +63,24 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++config IP_NF_MATCH_LAYER7
++	tristate "Layer 7 match support (EXPERIMENTAL)"
++	depends on IP_NF_IPTABLES && IP_NF_CT_ACCT && IP_NF_CONNTRACK && EXPERIMENTAL
++	help
++	  Say Y if you want to be able to classify connections (and their
++	  packets) based on regular expression matching of their application
++	  layer data.   This is one way to classify applications such as
++	  peer-to-peer filesharing systems that do not always use the same
++	  port.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_MATCH_LAYER7_DEBUG
++	bool "Layer 7 debugging output"
++	depends on IP_NF_MATCH_LAYER7
++	help
++	  Say Y to get lots of debugging output.
++
+ config IP_NF_MATCH_TOS
+ 	tristate "TOS match support"
+ 	depends on IP_NF_IPTABLES
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:13:52.651129664 +0200
+@@ -50,6 +50,8 @@
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+ 
++obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
++
+ # targets
+ obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+ obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.c linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.c	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c	1.3 of 18 April 87
++ *
++ *	Copyright (c) 1986 by University of Toronto.
++ *	Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *	Permission is granted to anyone to use this software for any
++ *	purpose on any computer system, and to redistribute it freely,
++ *	subject to the following restrictions:
++ *
++ *	1. The author is not responsible for the consequences of use of
++ *		this software, no matter how awful, even if they arise
++ *		from defects in it.
++ *
++ *	2. The origin of this software must not be misrepresented, either
++ *		by explicit claim or by omission.
++ *
++ *	3. Altered versions must be plainly marked as such, and must not
++ *		be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions.  Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt.  Lets it work in both kernel and user space.
++(So iptables can use it, for instance.)  Yea, it goes both ways... */
++#if __KERNEL__
++  #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++  #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++        printk("<3>Regexp: %s\n", s);
++        /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases.  They are:
++ *
++ * regstart	char that must begin a match; '\0' if none obvious
++ * reganch	is the match anchored (at beginning-of-line only)?
++ * regmust	string (pointer into program) that match must include, or NULL
++ * regmlen	length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot.  Regmust permits fast rejection
++ * of lines that cannot possibly match.  The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup).  Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program".  This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology).  Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand.  "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives.  (Here we
++ * have one of the subtle syntax dependencies:  an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.)  The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM.  In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure:  the tail of the branch connects
++ * to the thing following the set of BRANCHes.)  The opcodes are:
++ */
++
++/* definition	number	opnd?	meaning */
++#define	END	0	/* no	End of program. */
++#define	BOL	1	/* no	Match "" at beginning of line. */
++#define	EOL	2	/* no	Match "" at end of line. */
++#define	ANY	3	/* no	Match any one character. */
++#define	ANYOF	4	/* str	Match any character in this string. */
++#define	ANYBUT	5	/* str	Match any character not in this string. */
++#define	BRANCH	6	/* node	Match this alternative, or the next... */
++#define	BACK	7	/* no	Match "", "next" ptr points backward. */
++#define	EXACTLY	8	/* str	Match this string. */
++#define	NOTHING	9	/* no	Match empty string. */
++#define	STAR	10	/* node	Match this (simple) thing 0 or more times. */
++#define	PLUS	11	/* node	Match this (simple) thing 1 or more times. */
++#define	OPEN	20	/* no	Mark this point in input as start of #n. */
++			/*	OPEN+1 is number 1, etc. */
++#define	CLOSE	30	/* no	Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH	The set of branches constituting a single choice are hooked
++ *		together with their "next" pointers, since precedence prevents
++ *		anything being concatenated to any individual branch.  The
++ *		"next" pointer of the last BRANCH in a choice points to the
++ *		thing following the whole choice.  This is also where the
++ *		final "next" pointer of each individual branch points; each
++ *		branch starts with the operand node of a BRANCH node.
++ *
++ * BACK		Normal "next" pointers all implicitly point forward; BACK
++ *		exists to make loop structures possible.
++ *
++ * STAR,PLUS	'?', and complex '*' and '+', are implemented as circular
++ *		BRANCH structures using BACK.  Simple cases (one character
++ *		per match) are implemented with STAR and PLUS for speed
++ *		and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE	...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first.  The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node.  (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define	OP(p)	(*(p))
++#define	NEXT(p)	(((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define	OPERAND(p)	((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define	UCHARAT(p)	((int)*(unsigned char *)(p))
++#else
++#define	UCHARAT(p)	((int)*(p)&CHARBITS)
++#endif
++
++#define	FAIL(m)	{ regerror(m); return(NULL); }
++#define	ISMULT(c)	((c) == '*' || (c) == '+' || (c) == '?')
++#define	META	"^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define	HASWIDTH	01	/* Known never to match null string. */
++#define	SIMPLE		02	/* Simple enough to be STAR/PLUS operand. */
++#define	SPSTART		04	/* Starts with * or +. */
++#define	WORST		0	/* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput;		/* String-input pointer. */
++char *regbol;		/* Beginning of input, for ^ check. */
++char **regstartp;	/* Pointer to startp array. */
++char **regendp;		/* Ditto for endp. */
++char *regparse;		/* Input-scan pointer. */
++int regnpar;		/* () count. */
++char regdummy;
++char *regcode;		/* Code-emit pointer; &regdummy = don't. */
++long regsize;		/* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define	STATIC	static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++        char *scan1;
++        char *scan2;
++        int count;
++
++        count = 0;
++        for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++                for (scan2 = (char *)s2; *scan2 != '\0';)       /* ++ moved down. */
++                        if (*scan1 == *scan2++)
++                                return(count);
++                count++;
++        }
++        return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code.  So we cheat:  we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it.  (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++	register regexp *r;
++	register char *scan;
++	register char *longest;
++	register int len;
++	int flags;
++	struct match_globals g;
++	
++	/* commented out by ethan
++	   extern char *malloc();
++	*/
++
++	if (exp == NULL)
++		FAIL("NULL argument");
++
++	/* First pass: determine size, legality. */
++	g.regparse = exp;
++	g.regnpar = 1;
++	g.regsize = 0L;
++	g.regcode = &g.regdummy;
++	regc(&g, MAGIC);
++	if (reg(&g, 0, &flags) == NULL)
++		return(NULL);
++
++	/* Small enough for pointer-storage convention? */
++	if (g.regsize >= 32767L)		/* Probably could be 65535L. */
++		FAIL("regexp too big");
++
++	/* Allocate space. */
++	*patternsize=sizeof(regexp) + (unsigned)g.regsize;
++	r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++	if (r == NULL)
++		FAIL("out of space");
++
++	/* Second pass: emit code. */
++	g.regparse = exp;
++	g.regnpar = 1;
++	g.regcode = r->program;
++	regc(&g, MAGIC);
++	if (reg(&g, 0, &flags) == NULL)
++		return(NULL);
++
++	/* Dig out information for optimizations. */
++	r->regstart = '\0';	/* Worst-case defaults. */
++	r->reganch = 0;
++	r->regmust = NULL;
++	r->regmlen = 0;
++	scan = r->program+1;			/* First BRANCH. */
++	if (OP(regnext(&g, scan)) == END) {		/* Only one top-level choice. */
++		scan = OPERAND(scan);
++
++		/* Starting-point info. */
++		if (OP(scan) == EXACTLY)
++			r->regstart = *OPERAND(scan);
++		else if (OP(scan) == BOL)
++			r->reganch++;
++
++		/*
++		 * If there's something expensive in the r.e., find the
++		 * longest literal string that must appear and make it the
++		 * regmust.  Resolve ties in favor of later strings, since
++		 * the regstart check works with the beginning of the r.e.
++		 * and avoiding duplication strengthens checking.  Not a
++		 * strong reason, but sufficient in the absence of others.
++		 */
++		if (flags&SPSTART) {
++			longest = NULL;
++			len = 0;
++			for (; scan != NULL; scan = regnext(&g, scan))
++				if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++					longest = OPERAND(scan);
++					len = strlen(OPERAND(scan));
++				}
++			r->regmust = longest;
++			r->regmlen = len;
++		}
++	}
++
++	return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++	register char *ret;
++	register char *br;
++	register char *ender;
++	register int parno = 0; /* 0 makes gcc happy */
++	int flags;
++
++	*flagp = HASWIDTH;	/* Tentatively. */
++
++	/* Make an OPEN node, if parenthesized. */
++	if (paren) {
++		if (g->regnpar >= NSUBEXP)
++			FAIL("too many ()");
++		parno = g->regnpar;
++		g->regnpar++;
++		ret = regnode(g, OPEN+parno);
++	} else
++		ret = NULL;
++
++	/* Pick up the branches, linking them together. */
++	br = regbranch(g, &flags);
++	if (br == NULL)
++		return(NULL);
++	if (ret != NULL)
++		regtail(g, ret, br);	/* OPEN -> first. */
++	else
++		ret = br;
++	if (!(flags&HASWIDTH))
++		*flagp &= ~HASWIDTH;
++	*flagp |= flags&SPSTART;
++	while (*g->regparse == '|') {
++		g->regparse++;
++		br = regbranch(g, &flags);
++		if (br == NULL)
++			return(NULL);
++		regtail(g, ret, br);	/* BRANCH -> BRANCH. */
++		if (!(flags&HASWIDTH))
++			*flagp &= ~HASWIDTH;
++		*flagp |= flags&SPSTART;
++	}
++
++	/* Make a closing node, and hook it on the end. */
++	ender = regnode(g, (paren) ? CLOSE+parno : END);	
++	regtail(g, ret, ender);
++
++	/* Hook the tails of the branches to the closing node. */
++	for (br = ret; br != NULL; br = regnext(g, br))
++		regoptail(g, br, ender);
++
++	/* Check for proper termination. */
++	if (paren && *g->regparse++ != ')') {
++		FAIL("unmatched ()");
++	} else if (!paren && *g->regparse != '\0') {
++		if (*g->regparse == ')') {
++			FAIL("unmatched ()");
++		} else
++			FAIL("junk on end");	/* "Can't happen". */
++		/* NOTREACHED */
++	}
++
++	return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	register char *chain;
++	register char *latest;
++	int flags;
++
++	*flagp = WORST;		/* Tentatively. */
++
++	ret = regnode(g, BRANCH);
++	chain = NULL;
++	while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++		latest = regpiece(g, &flags);
++		if (latest == NULL)
++			return(NULL);
++		*flagp |= flags&HASWIDTH;
++		if (chain == NULL)	/* First piece. */
++			*flagp |= flags&SPSTART;
++		else
++			regtail(g, chain, latest);
++		chain = latest;
++	}
++	if (chain == NULL)	/* Loop ran zero times. */
++		(void) regnode(g, NOTHING);
++
++	return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized:  they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	register char op;
++	register char *next;
++	int flags;
++
++	ret = regatom(g, &flags);
++	if (ret == NULL)
++		return(NULL);
++
++	op = *g->regparse;
++	if (!ISMULT(op)) {
++		*flagp = flags;
++		return(ret);
++	}
++
++	if (!(flags&HASWIDTH) && op != '?')
++		FAIL("*+ operand could be empty");
++	*flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++	if (op == '*' && (flags&SIMPLE))
++		reginsert(g, STAR, ret);
++	else if (op == '*') {
++		/* Emit x* as (x&|), where & means "self". */
++		reginsert(g, BRANCH, ret);			/* Either x */
++		regoptail(g, ret, regnode(g, BACK));		/* and loop */
++		regoptail(g, ret, ret);			/* back */
++		regtail(g, ret, regnode(g, BRANCH));		/* or */
++		regtail(g, ret, regnode(g, NOTHING));		/* null. */
++	} else if (op == '+' && (flags&SIMPLE))
++		reginsert(g, PLUS, ret);
++	else if (op == '+') {
++		/* Emit x+ as x(&|), where & means "self". */
++		next = regnode(g, BRANCH);			/* Either */
++		regtail(g, ret, next);
++		regtail(g, regnode(g, BACK), ret);		/* loop back */
++		regtail(g, next, regnode(g, BRANCH));		/* or */
++		regtail(g, ret, regnode(g, NOTHING));		/* null. */
++	} else if (op == '?') {
++		/* Emit x? as (x|) */
++		reginsert(g, BRANCH, ret);			/* Either x */
++		regtail(g, ret, regnode(g, BRANCH));		/* or */
++		next = regnode(g, NOTHING);		/* null. */
++		regtail(g, ret, next);
++		regoptail(g, ret, next);
++	}
++	g->regparse++;
++	if (ISMULT(*g->regparse))
++		FAIL("nested *?+");
++
++	return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization:  gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run.  Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	int flags;
++
++	*flagp = WORST;		/* Tentatively. */
++
++	switch (*g->regparse++) {
++	case '^':
++		ret = regnode(g, BOL);
++		break;
++	case '$':
++		ret = regnode(g, EOL);
++		break;
++	case '.':
++		ret = regnode(g, ANY);
++		*flagp |= HASWIDTH|SIMPLE;
++		break;
++	case '[': {
++			register int class;
++			register int classend;
++
++			if (*g->regparse == '^') {	/* Complement of range. */
++				ret = regnode(g, ANYBUT);
++				g->regparse++;
++			} else
++				ret = regnode(g, ANYOF);
++			if (*g->regparse == ']' || *g->regparse == '-')
++				regc(g, *g->regparse++);
++			while (*g->regparse != '\0' && *g->regparse != ']') {
++				if (*g->regparse == '-') {
++					g->regparse++;
++					if (*g->regparse == ']' || *g->regparse == '\0')
++						regc(g, '-');
++					else {
++						class = UCHARAT(g->regparse-2)+1;
++						classend = UCHARAT(g->regparse);
++						if (class > classend+1)
++							FAIL("invalid [] range");
++						for (; class <= classend; class++)
++							regc(g, class);
++						g->regparse++;
++					}
++				} else
++					regc(g, *g->regparse++);
++			}
++			regc(g, '\0');
++			if (*g->regparse != ']')
++				FAIL("unmatched []");
++			g->regparse++;
++			*flagp |= HASWIDTH|SIMPLE;
++		}
++		break;
++	case '(':
++		ret = reg(g, 1, &flags);
++		if (ret == NULL)
++			return(NULL);
++		*flagp |= flags&(HASWIDTH|SPSTART);
++		break;
++	case '\0':
++	case '|':
++	case ')':
++		FAIL("internal urp");	/* Supposed to be caught earlier. */
++		break;
++	case '?':
++	case '+':
++	case '*':
++		FAIL("?+* follows nothing");
++		break;
++	case '\\':
++		if (*g->regparse == '\0')
++			FAIL("trailing \\");
++		ret = regnode(g, EXACTLY);
++		regc(g, *g->regparse++);
++		regc(g, '\0');
++		*flagp |= HASWIDTH|SIMPLE;
++		break;
++	default: {
++			register int len;
++			register char ender;
++
++			g->regparse--;
++			len = my_strcspn((const char *)g->regparse, (const char *)META);
++			if (len <= 0)
++				FAIL("internal disaster");
++			ender = *(g->regparse+len);
++			if (len > 1 && ISMULT(ender))
++				len--;		/* Back off clear of ?+* operand. */
++			*flagp |= HASWIDTH;
++			if (len == 1)
++				*flagp |= SIMPLE;
++			ret = regnode(g, EXACTLY);
++			while (len > 0) {
++				regc(g, *g->regparse++);
++				len--;
++			}
++			regc(g, '\0');
++		}
++		break;
++	}
++
++	return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char *			/* Location. */
++regnode(struct match_globals *g, char op)
++{
++	register char *ret;
++	register char *ptr;
++
++	ret = g->regcode;
++	if (ret == &g->regdummy) {
++		g->regsize += 3;
++		return(ret);
++	}
++
++	ptr = ret;
++	*ptr++ = op;
++	*ptr++ = '\0';		/* Null "next" pointer. */
++	*ptr++ = '\0';
++	g->regcode = ptr;
++
++	return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++	if (g->regcode != &g->regdummy)
++		*g->regcode++ = b;
++	else
++		g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++	register char *src;
++	register char *dst;
++	register char *place;
++
++	if (g->regcode == &g->regdummy) {
++		g->regsize += 3;
++		return;
++	}
++
++	src = g->regcode;
++	g->regcode += 3;
++	dst = g->regcode;
++	while (src > opnd)
++		*--dst = *--src;
++
++	place = opnd;		/* Op node, where operand used to be. */
++	*place++ = op;
++	*place++ = '\0';
++	*place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++	register char *scan;
++	register char *temp;
++	register int offset;
++
++	if (p == &g->regdummy)
++		return;
++
++	/* Find last node. */
++	scan = p;
++	for (;;) {
++		temp = regnext(g, scan);
++		if (temp == NULL)
++			break;
++		scan = temp;
++	}
++
++	if (OP(scan) == BACK)
++		offset = scan - val;
++	else
++		offset = val - scan;
++	*(scan+1) = (offset>>8)&0377;
++	*(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++	/* "Operandless" and "op != BRANCH" are synonymous in practice. */
++	if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++		return;
++	regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++	register char *s;
++	struct match_globals g;
++
++	/* Be paranoid... */
++	if (prog == NULL || string == NULL) {
++		printk("<3>Regexp: NULL parameter\n");
++		return(0);
++	}
++
++	/* Check validity of program. */
++	if (UCHARAT(prog->program) != MAGIC) {
++		printk("<3>Regexp: corrupted program\n");
++		return(0);
++	}
++
++	/* If there is a "must appear" string, look for it. */
++	if (prog->regmust != NULL) {
++		s = string;
++		while ((s = strchr(s, prog->regmust[0])) != NULL) {
++			if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++				break;	/* Found it. */
++			s++;
++		}
++		if (s == NULL)	/* Not present. */
++			return(0);
++	}
++
++	/* Mark beginning of line for ^ . */
++	g.regbol = string;
++
++	/* Simplest case:  anchored match need be tried only once. */
++	if (prog->reganch)
++		return(regtry(&g, prog, string));
++
++	/* Messy cases:  unanchored match. */
++	s = string;
++	if (prog->regstart != '\0')
++		/* We know what char it must start with. */
++		while ((s = strchr(s, prog->regstart)) != NULL) {
++			if (regtry(&g, prog, s))
++				return(1);
++			s++;
++		}
++	else
++		/* We don't -- general case. */
++		do {
++			if (regtry(&g, prog, s))
++				return(1);
++		} while (*s++ != '\0');
++
++	/* Failure. */
++	return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int			/* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++	register int i;
++	register char **sp;
++	register char **ep;
++
++	g->reginput = string;
++	g->regstartp = prog->startp;
++	g->regendp = prog->endp;
++
++	sp = prog->startp;
++	ep = prog->endp;
++	for (i = NSUBEXP; i > 0; i--) {
++		*sp++ = NULL;
++		*ep++ = NULL;
++	}
++	if (regmatch(g, prog->program + 1)) {
++		prog->startp[0] = string;
++		prog->endp[0] = g->reginput;
++		return(1);
++	} else
++		return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple:  check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly.  In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int			/* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++	register char *scan = prog; /* Current node. */
++	char *next;		    /* Next node. */
++
++#ifdef DEBUG
++	if (scan != NULL && regnarrate)
++		fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++	while (scan != NULL) {
++#ifdef DEBUG
++		if (regnarrate)
++			fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++		next = regnext(g, scan);
++
++		switch (OP(scan)) {
++		case BOL:
++			if (g->reginput != g->regbol)
++				return(0);
++			break;
++		case EOL:
++			if (*g->reginput != '\0')
++				return(0);
++			break;
++		case ANY:
++			if (*g->reginput == '\0')
++				return(0);
++			g->reginput++;
++			break;
++		case EXACTLY: {
++				register int len;
++				register char *opnd;
++
++				opnd = OPERAND(scan);
++				/* Inline the first character, for speed. */
++				if (*opnd != *g->reginput)
++					return(0);
++				len = strlen(opnd);
++				if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++					return(0);
++				g->reginput += len;
++			}
++			break;
++		case ANYOF:
++			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++				return(0);
++			g->reginput++;
++			break;
++		case ANYBUT:
++			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++				return(0);
++			g->reginput++;
++			break;
++		case NOTHING:
++		case BACK:
++			break;
++		case OPEN+1:
++		case OPEN+2:
++		case OPEN+3:
++		case OPEN+4:
++		case OPEN+5:
++		case OPEN+6:
++		case OPEN+7:
++		case OPEN+8:
++		case OPEN+9: {
++				register int no;
++				register char *save;
++
++				no = OP(scan) - OPEN;
++				save = g->reginput;
++
++				if (regmatch(g, next)) {
++					/*
++					 * Don't set startp if some later
++					 * invocation of the same parentheses
++					 * already has.
++					 */
++					if (g->regstartp[no] == NULL)
++						g->regstartp[no] = save;
++					return(1);
++				} else
++					return(0);
++			}
++			break;
++		case CLOSE+1:
++		case CLOSE+2:
++		case CLOSE+3:
++		case CLOSE+4:
++		case CLOSE+5:
++		case CLOSE+6:
++		case CLOSE+7:
++		case CLOSE+8:
++		case CLOSE+9:
++			{
++				register int no;
++				register char *save;
++
++				no = OP(scan) - CLOSE;
++				save = g->reginput;
++
++				if (regmatch(g, next)) {
++					/*
++					 * Don't set endp if some later
++					 * invocation of the same parentheses
++					 * already has.
++					 */
++					if (g->regendp[no] == NULL)
++						g->regendp[no] = save;
++					return(1);
++				} else
++					return(0);
++			}
++			break;
++		case BRANCH: {
++				register char *save;
++
++				if (OP(next) != BRANCH)		/* No choice. */
++					next = OPERAND(scan);	/* Avoid recursion. */
++				else {
++					do {
++						save = g->reginput;
++						if (regmatch(g, OPERAND(scan)))
++							return(1);
++						g->reginput = save;
++						scan = regnext(g, scan);
++					} while (scan != NULL && OP(scan) == BRANCH);
++					return(0);
++					/* NOTREACHED */
++				}
++			}
++			break;
++		case STAR:
++		case PLUS: {
++				register char nextch;
++				register int no;
++				register char *save;
++				register int min;
++
++				/*
++				 * Lookahead to avoid useless match attempts
++				 * when we know what character comes next.
++				 */
++				nextch = '\0';
++				if (OP(next) == EXACTLY)
++					nextch = *OPERAND(next);
++				min = (OP(scan) == STAR) ? 0 : 1;
++				save = g->reginput;
++				no = regrepeat(g, OPERAND(scan));
++				while (no >= min) {
++					/* If it could work, try it. */
++					if (nextch == '\0' || *g->reginput == nextch)
++						if (regmatch(g, next))
++							return(1);
++					/* Couldn't or didn't -- back up. */
++					no--;
++					g->reginput = save + no;
++				}
++				return(0);
++			}
++			break;
++		case END:
++			return(1);	/* Success! */
++			break;
++		default:
++			printk("<3>Regexp: memory corruption\n");
++			return(0);
++			break;
++		}
++
++		scan = next;
++	}
++
++	/*
++	 * We get here only if there's trouble -- normally "case END" is
++	 * the terminating point.
++	 */
++	printk("<3>Regexp: corrupted pointers\n");
++	return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++	register int count = 0;
++	register char *scan;
++	register char *opnd;
++
++	scan = g->reginput;
++	opnd = OPERAND(p);
++	switch (OP(p)) {
++	case ANY:
++		count = strlen(scan);
++		scan += count;
++		break;
++	case EXACTLY:
++		while (*opnd == *scan) {
++			count++;
++			scan++;
++		}
++		break;
++	case ANYOF:
++		while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++			count++;
++			scan++;
++		}
++		break;
++	case ANYBUT:
++		while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++			count++;
++			scan++;
++		}
++		break;
++	default:		/* Oh dear.  Called inappropriately. */
++		printk("<3>Regexp: internal foulup\n");
++		count = 0;	/* Best compromise. */
++		break;
++	}
++	g->reginput = scan;
++
++	return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++	register int offset;
++
++	if (p == &g->regdummy)
++		return(NULL);
++
++	offset = NEXT(p);
++	if (offset == 0)
++		return(NULL);
++
++	if (OP(p) == BACK)
++		return(p-offset);
++	else
++		return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++	register char *s;
++	register char op = EXACTLY;	/* Arbitrary non-END op. */
++	register char *next;
++	/* extern char *strchr(); */
++
++
++	s = r->program + 1;
++	while (op != END) {	/* While that wasn't END last time... */
++		op = OP(s);
++		printf("%2d%s", s-r->program, regprop(s));	/* Where, what. */
++		next = regnext(s);
++		if (next == NULL)		/* Next ptr. */
++			printf("(0)");
++		else
++			printf("(%d)", (s-r->program)+(next-s));
++		s += 3;
++		if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++			/* Literal string, where present. */
++			while (*s != '\0') {
++				putchar(*s);
++				s++;
++			}
++			s++;
++		}
++		putchar('\n');
++	}
++
++	/* Header fields of interest. */
++	if (r->regstart != '\0')
++		printf("start `%c' ", r->regstart);
++	if (r->reganch)
++		printf("anchored ");
++	if (r->regmust != NULL)
++		printf("must have \"%s\"", r->regmust);
++	printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++	register char *p;
++	static char buf[BUFLEN];
++
++	strcpy(buf, ":");
++
++	switch (OP(op)) {
++	case BOL:
++		p = "BOL";
++		break;
++	case EOL:
++		p = "EOL";
++		break;
++	case ANY:
++		p = "ANY";
++		break;
++	case ANYOF:
++		p = "ANYOF";
++		break;
++	case ANYBUT:
++		p = "ANYBUT";
++		break;
++	case BRANCH:
++		p = "BRANCH";
++		break;
++	case EXACTLY:
++		p = "EXACTLY";
++		break;
++	case NOTHING:
++		p = "NOTHING";
++		break;
++	case BACK:
++		p = "BACK";
++		break;
++	case END:
++		p = "END";
++		break;
++	case OPEN+1:
++	case OPEN+2:
++	case OPEN+3:
++	case OPEN+4:
++	case OPEN+5:
++	case OPEN+6:
++	case OPEN+7:
++	case OPEN+8:
++	case OPEN+9:
++		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++		p = NULL;
++		break;
++	case CLOSE+1:
++	case CLOSE+2:
++	case CLOSE+3:
++	case CLOSE+4:
++	case CLOSE+5:
++	case CLOSE+6:
++	case CLOSE+7:
++	case CLOSE+8:
++	case CLOSE+9:
++		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++		p = NULL;
++		break;
++	case STAR:
++		p = "STAR";
++		break;
++	case PLUS:
++		p = "PLUS";
++		break;
++	default:
++		printk("<3>Regexp: corrupted opcode\n");
++		break;
++	}
++	if (p != NULL)
++		strncat(buf, p, BUFLEN-strlen(buf));
++	return(buf);
++}
++#endif
++
++
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.h linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.h
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.h	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat:  this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10.  If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP  10
++typedef struct regexp {
++	char *startp[NSUBEXP];
++	char *endp[NSUBEXP];
++	char regstart;		/* Internal use only. */
++	char reganch;		/* Internal use only. */
++	char *regmust;		/* Internal use only. */
++	int regmlen;		/* Internal use only. */
++	char program[1];	/* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regmagic.h linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regmagic.h
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regmagic.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regmagic.h	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define	MAGIC	0234
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regsub.c linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regsub.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regsub.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regsub.c	2007-05-26 20:13:52.653129360 +0200
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c	1.3 of 2 April 86
++ *
++ *	Copyright (c) 1986 by University of Toronto.
++ *	Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *	Permission is granted to anyone to use this software for any
++ *	purpose on any computer system, and to redistribute it freely,
++ *	subject to the following restrictions:
++ *
++ *	1. The author is not responsible for the consequences of use of
++ *		this software, no matter how awful, even if they arise
++ *		from defects in it.
++ *
++ *	2. The origin of this software must not be misrepresented, either
++ *		by explicit claim or by omission.
++ *
++ *	3. Altered versions must be plainly marked as such, and must not
++ *		be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define	UCHARAT(p)	((int)*(unsigned char *)(p))
++#else
++#define	UCHARAT(p)	((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++//        printk("regexp(3): %s", s);
++//        /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++	register char *src;
++	register char *dst;
++	register char c;
++	register int no;
++	register int len;
++	
++	/* Not necessary and gcc doesn't like it -MLS */
++	/*extern char *strncpy();*/
++
++	if (prog == NULL || source == NULL || dest == NULL) {
++		regerror("NULL parm to regsub");
++		return;
++	}
++	if (UCHARAT(prog->program) != MAGIC) {
++		regerror("damaged regexp fed to regsub");
++		return;
++	}
++
++	src = source;
++	dst = dest;
++	while ((c = *src++) != '\0') {
++		if (c == '&')
++			no = 0;
++		else if (c == '\\' && '0' <= *src && *src <= '9')
++			no = *src++ - '0';
++		else
++			no = -1;
++
++		if (no < 0) {	/* Ordinary character. */
++			if (c == '\\' && (*src == '\\' || *src == '&'))
++				c = *src++;
++			*dst++ = c;
++		} else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++			len = prog->endp[no] - prog->startp[no];
++			(void) strncpy(dst, prog->startp[no], len);
++			dst += len;
++			if (len != 0 && *(dst-1) == '\0') {	/* strncpy hit NUL. */
++				regerror("damaged match string");
++				return;
++			}
++		}
++	}
++	*dst++ = '\0';
++}
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/101-netfilter_layer7_pktmatch.patch
@@ -1,1 +1,109 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:17:47.624408296 +0200
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:17:48.729240336 +0200
+@@ -21,6 +21,7 @@
+     char protocol[MAX_PROTOCOL_LEN];
+     char invert:1;
+     char pattern[MAX_PATTERN_LEN];
++    char pkt;
+ };
+ 
+ #endif /* _IPT_LAYER7_H */
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:17:47.626407992 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:17:48.729240336 +0200
+@@ -296,33 +296,34 @@
+ 	}
+ }
+ 
+-/* add the new app data to the conntrack.  Return number of bytes added. */
+-static int add_data(struct ip_conntrack * master_conntrack,
+-			char * app_data, int appdatalen)
++static int add_datastr(char *target, int offset, char *app_data, int len)
+ {
+ 	int length = 0, i;
+-	int oldlength = master_conntrack->layer7.app_data_len;
+-
+-	// This is a fix for a race condition by Deti Fliegl. However, I'm not 
+-	// clear on whether the race condition exists or whether this really 
+-	// fixes it.  I might just be being dense... Anyway, if it's not really 
+-	// a fix, all it does is waste a very small amount of time.
+-	if(!master_conntrack->layer7.app_data) return 0;
++	if(!target) return 0;
+ 
+ 	/* Strip nulls. Make everything lower case (our regex lib doesn't
+ 	do case insensitivity).  Add it to the end of the current data. */
+-	for(i = 0; i < maxdatalen-oldlength-1 &&
+-		   i < appdatalen; i++) {
++	for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
+ 		if(app_data[i] != '\0') {
+-			master_conntrack->layer7.app_data[length+oldlength] =
++			target[length+offset] =
+ 				/* the kernel version of tolower mungs 'upper ascii' */
+ 				isascii(app_data[i])? tolower(app_data[i]) : app_data[i];
+ 			length++;
+ 		}
+ 	}
++	target[length+offset] = '\0';
+ 
+-	master_conntrack->layer7.app_data[length+oldlength] = '\0';
+-	master_conntrack->layer7.app_data_len = length + oldlength;
++	return length;
++}
++
++/* add the new app data to the conntrack.  Return number of bytes added. */
++static int add_data(struct ip_conntrack * master_conntrack,
++			char * app_data, int appdatalen)
++{
++	int length;
++
++	length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
++	master_conntrack->layer7.app_data_len += length;
+ 
+ 	return length;
+ }
+@@ -339,7 +340,7 @@
+ 	struct ipt_layer7_info * info = (struct ipt_layer7_info *)matchinfo;
+ 	enum ip_conntrack_info master_ctinfo, ctinfo;
+ 	struct ip_conntrack *master_conntrack, *conntrack;
+-	unsigned char * app_data;
++	unsigned char *app_data, *tmp_data;
+ 	unsigned int pattern_result, appdatalen;
+ 	regexp * comppattern;
+ 
+@@ -362,8 +363,8 @@
+ 		master_conntrack = master_ct(master_conntrack);
+ 
+ 	/* if we've classified it or seen too many packets */
+-	if(TOTAL_PACKETS > num_packets ||
+-	   master_conntrack->layer7.app_proto) {
++	if(!info->pkt && (TOTAL_PACKETS > num_packets ||
++		master_conntrack->layer7.app_proto)) {
+ 
+ 		pattern_result = match_no_append(conntrack, master_conntrack, ctinfo, master_ctinfo, info);
+ 
+@@ -394,6 +395,23 @@
+ 	comppattern = compile_and_cache(info->pattern, info->protocol);
+ 	spin_unlock_bh(&list_lock);
+ 
++	if (info->pkt) {
++		tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
++		if(!tmp_data){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			return info->invert;
++		}
++
++		tmp_data[0] = '\0';
++		add_datastr(tmp_data, 0, app_data, appdatalen);
++		pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
++		kfree(tmp_data);
++		tmp_data = NULL;
++
++		return (pattern_result ^ info->invert);
++	}
++
+ 	/* On the first packet of a connection, allocate space for app data */
+ 	write_lock(&ct_lock);
+ 	if(TOTAL_PACKETS == 1 && !skb->cb[0] && !master_conntrack->layer7.app_data) {
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/110-ipp2p_0.8.1rc1.patch
@@ -1,1 +1,949 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ipp2p.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ipp2p.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ipp2p.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ipp2p.h	2007-05-26 20:21:54.586864296 +0200
+@@ -0,0 +1,31 @@
++#ifndef __IPT_IPP2P_H
++#define __IPT_IPP2P_H
++#define IPP2P_VERSION "0.8.1_rc1"
++
++struct ipt_p2p_info {
++    int cmd;
++    int debug;
++};
++
++#endif //__IPT_IPP2P_H
++
++#define SHORT_HAND_IPP2P	1 /* --ipp2p switch*/
++//#define SHORT_HAND_DATA		4 /* --ipp2p-data switch*/
++#define SHORT_HAND_NONE		5 /* no short hand*/
++
++#define IPP2P_EDK		(1 << 1)
++#define IPP2P_DATA_KAZAA	(1 << 2)
++#define IPP2P_DATA_EDK		(1 << 3)
++#define IPP2P_DATA_DC		(1 << 4)
++#define IPP2P_DC		(1 << 5)
++#define IPP2P_DATA_GNU		(1 << 6)
++#define IPP2P_GNU		(1 << 7)
++#define IPP2P_KAZAA		(1 << 8)
++#define IPP2P_BIT		(1 << 9)
++#define IPP2P_APPLE		(1 << 10)
++#define IPP2P_SOUL		(1 << 11)
++#define IPP2P_WINMX		(1 << 12)
++#define IPP2P_ARES		(1 << 13)
++#define IPP2P_MUTE		(1 << 14)
++#define IPP2P_WASTE		(1 << 15)
++#define IPP2P_XDCC		(1 << 16)
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ipp2p.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ipp2p.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ipp2p.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ipp2p.c	2007-05-26 20:21:54.587864144 +0200
+@@ -0,0 +1,881 @@
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ipt_ipp2p.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++
++#define get_u8(X,O)  (*(__u8 *)(X + O))
++#define get_u16(X,O)  (*(__u16 *)(X + O))
++#define get_u32(X,O)  (*(__u32 *)(X + O))
++
++MODULE_AUTHOR("Eicke Friedrich/Klaus Degner <ipp2p@ipp2p.org>");
++MODULE_DESCRIPTION("An extension to iptables to identify P2P traffic.");
++MODULE_LICENSE("GPL");
++
++
++/*Search for UDP eDonkey/eMule/Kad commands*/
++int
++udp_search_edk (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++
++	switch (t[0]) {
++		case 0xe3:
++		{	/*edonkey*/
++			switch (t[1])
++			{
++				/* client -> server status request */
++				case 0x96:
++					if (packet_len == 14) return ((IPP2P_EDK * 100) + 50);
++					break;
++				/* server -> client status request */
++				case 0x97: if (packet_len == 42) return ((IPP2P_EDK * 100) + 51);
++					break;
++						/* server description request */
++						/* e3 2a ff f0 .. | size == 6 */
++				case 0xa2: if ( (packet_len == 14) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52);
++					break;
++						/* server description response */
++						/* e3 a3 ff f0 ..  | size > 40 && size < 200 */
++				//case 0xa3: return ((IPP2P_EDK * 100) + 53);
++				//	break;
++				case 0x9a: if (packet_len==26) return ((IPP2P_EDK * 100) + 54);
++					break;
++
++				case 0x92: if (packet_len==18) return ((IPP2P_EDK * 100) + 55);
++					break;
++			}
++			break;
++		}
++		case 0xe4:
++		{
++			switch (t[1])
++			{
++						/* e4 20 .. | size == 43 */
++				case 0x20: if ((packet_len == 43) && (t[2] != 0x00) && (t[34] != 0x00)) return ((IPP2P_EDK * 100) + 60);
++					break;
++						/* e4 00 .. 00 | size == 35 ? */
++				case 0x00: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 61);
++					break;
++						/* e4 10 .. 00 | size == 35 ? */
++				case 0x10: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 62);
++					break;
++						/* e4 18 .. 00 | size == 35 ? */
++				case 0x18: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 63);
++					break;
++						/* e4 52 .. | size = 44 */
++				case 0x52: if (packet_len == 44 ) return ((IPP2P_EDK * 100) + 64);
++					break;
++						/* e4 58 .. | size == 6 */
++				case 0x58: if (packet_len == 14 ) return ((IPP2P_EDK * 100) + 65);
++					break;
++						/* e4 59 .. | size == 2 */
++				case 0x59: if (packet_len == 10 )return ((IPP2P_EDK * 100) + 66);
++					break;
++					/* e4 28 .. | packet_len == 52,77,102,127... */
++				case 0x28: if (((packet_len-52) % 25) == 0) return ((IPP2P_EDK * 100) + 67);
++					break;
++					/* e4 50 xx xx | size == 4 */
++				case 0x50: if (packet_len == 12) return ((IPP2P_EDK * 100) + 68);
++					break;
++					/* e4 40 xx xx | size == 48 */
++				case 0x40: if (packet_len == 56) return ((IPP2P_EDK * 100) + 69);
++					break;
++			}
++			break;
++		}
++	} /* end of switch (t[0]) */
++    return 0;
++}/*udp_search_edk*/
++
++
++/*Search for UDP Gnutella commands*/
++int
++udp_search_gnu (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++
++    if (memcmp(t, "GND", 3) == 0) return ((IPP2P_GNU * 100) + 51);
++    if (memcmp(t, "GNUTELLA ", 9) == 0) return ((IPP2P_GNU * 100) + 52);
++    return 0;
++}/*udp_search_gnu*/
++
++
++/*Search for UDP KaZaA commands*/
++int
++udp_search_kazaa (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++
++    if (t[packet_len-1] == 0x00){
++	t += (packet_len - 6);
++	if (memcmp(t, "KaZaA", 5) == 0) return (IPP2P_KAZAA * 100 +50);
++    }
++
++    return 0;
++}/*udp_search_kazaa*/
++
++/*Search for UDP DirectConnect commands*/
++int
++udp_search_directconnect (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    if ((*(t + 8) == 0x24) && (*(t + packet_len - 1) == 0x7c)) {
++    	t+=8;
++    	if (memcmp(t, "SR ", 3) == 0)	 		return ((IPP2P_DC * 100) + 60);
++    	if (memcmp(t, "Ping ", 5) == 0)	 		return ((IPP2P_DC * 100) + 61);
++    }
++    return 0;
++}/*udp_search_directconnect*/
++
++
++
++/*Search for UDP BitTorrent commands*/
++int
++udp_search_bit (unsigned char *haystack, int packet_len)
++{
++	switch(packet_len)
++	{
++		case 24:
++			/* ^ 00 00 04 17 27 10 19 80 */
++			if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980))
++				return (IPP2P_BIT * 100 + 50);
++			break;
++		case 44:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 51);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000400))
++				return (IPP2P_BIT * 100 + 61);
++			break;
++		case 65:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 52);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000404))
++				return (IPP2P_BIT * 100 + 62);
++			break;
++		case 67:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 53);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000406))
++				return (IPP2P_BIT * 100 + 63);
++			break;
++		case 211:
++			if (get_u32(haystack, 8) == __constant_htonl(0x00000405))
++				return (IPP2P_BIT * 100 + 54);
++			break;
++		case 29:
++			if ((get_u32(haystack, 8) == __constant_htonl(0x00000401)))
++				return (IPP2P_BIT * 100 + 55);
++			break;
++		case 52:
++			if (get_u32(haystack,8)  == __constant_htonl(0x00000827) &&
++			get_u32(haystack,12) == __constant_htonl(0x37502950))
++				return (IPP2P_BIT * 100 + 80);
++			break;
++		default:
++			/* this packet does not have a constant size */
++			if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 56);
++			break;
++	}
++
++	/* some extra-bitcomet rules:
++	* "d1:" [a|r] "d2:id20:"
++	*/
++	if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' )
++	{
++		if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r')
++		{
++			if (memcmp(haystack+12,"d2:id20:",8)==0)
++				return (IPP2P_BIT * 100 + 57);
++		}
++	}
++
++#if 0
++	/* bitlord rules */
++	/* packetlen must be bigger than 40 */
++	/* first 4 bytes are zero */
++	if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000)
++	{
++		/* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/
++		if (get_u32(haystack, 12) == 0x00000000 &&
++		    get_u32(haystack, 16) == 0x00010000 &&
++		    get_u32(haystack, 24) == 0x00000000 )
++			return (IPP2P_BIT * 100 + 71);
++
++		/* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/
++		if (get_u32(haystack, 12) == 0x00000001 &&
++		    get_u32(haystack, 16) == 0x000d0000 &&
++		    get_u32(haystack, 24) == 0x00000000 )
++			return (IPP2P_BIT * 100 + 71);
++
++
++	}
++#endif
++
++    return 0;
++}/*udp_search_bit*/
++
++
++
++/*Search for Ares commands*/
++//#define IPP2P_DEBUG_ARES
++int
++search_ares (const unsigned char *payload, const u16 plen)
++//int search_ares (unsigned char *haystack, int packet_len, int head_len)
++{
++//	const unsigned char *t = haystack + head_len;
++
++	/* all ares packets start with  */
++	if (payload[1] == 0 && (plen - payload[0]) == 3)
++	{
++		switch (payload[2])
++		{
++			case 0x5a:
++				/* ares connect */
++				if ( plen == 6 && payload[5] == 0x05 ) return ((IPP2P_ARES * 100) + 1);
++				break;
++			case 0x09:
++				/* ares search, min 3 chars --> 14 bytes
++				 * lets define a search can be up to 30 chars --> max 34 bytes
++				 */
++				if ( plen >= 14 && plen <= 34 ) return ((IPP2P_ARES * 100) + 1);
++				break;
++#ifdef IPP2P_DEBUG_ARES
++			default:
++			printk(KERN_DEBUG "Unknown Ares command %x recognized, len: %u \n", (unsigned int) payload[2],plen);
++#endif /* IPP2P_DEBUG_ARES */
++		}
++	}
++
++#if 0
++	/* found connect packet: 03 00 5a 04 03 05 */
++	/* new version ares 1.8: 03 00 5a xx xx 05 */
++    if ((plen) == 6){	/* possible connect command*/
++	if ((payload[0] == 0x03) && (payload[1] == 0x00) && (payload[2] == 0x5a) && (payload[5] == 0x05))
++	    return ((IPP2P_ARES * 100) + 1);
++    }
++    if ((plen) == 60){	/* possible download command*/
++	if ((payload[59] == 0x0a) && (payload[58] == 0x0a)){
++	    if (memcmp(t, "PUSH SHA1:", 10) == 0) /* found download command */
++	    	return ((IPP2P_ARES * 100) + 2);
++	}
++    }
++#endif
++
++    return 0;
++} /*search_ares*/
++
++/*Search for SoulSeek commands*/
++int
++search_soul (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_SOUL
++    /* match: xx xx xx xx | xx = sizeof(payload) - 4 */
++    if (get_u32(payload, 0) == (plen - 4)){
++	const __u32 m=get_u32(payload, 4);
++	/* match 00 yy yy 00, yy can be everything */
++        if ( get_u8(payload, 4) == 0x00 && get_u8(payload, 7) == 0x00 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "0: Soulseek command 0x%x recognized\n",get_u32(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 1);
++	}
++
++        /* next match: 01 yy 00 00 | yy can be everything */
++        if ( get_u8(payload, 4) == 0x01 && get_u16(payload, 6) == 0x0000 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "1: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 2);
++	}
++
++	/* other soulseek commandos are: 1-5,7,9,13-18,22,23,26,28,35-37,40-46,50,51,60,62-69,91,92,1001 */
++	/* try to do this in an intelligent way */
++	/* get all small commandos */
++	switch(m)
++	{
++		case 7:
++		case 9:
++		case 22:
++		case 23:
++		case 26:
++		case 28:
++		case 50:
++		case 51:
++		case 60:
++		case 91:
++		case 92:
++		case 1001:
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "2: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 3);
++	}
++
++	if (m > 0 && m < 6 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "3: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 4);
++	}
++	if (m > 12 && m < 19 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "4: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 5);
++	}
++
++	if (m > 34 && m < 38 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "5: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 6);
++	}
++
++	if (m > 39 && m < 47 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "6: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 7);
++	}
++
++	if (m > 61 && m < 70 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "7: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 8);
++	}
++
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "unknown SOULSEEK command: 0x%x, first 16 bit: 0x%x, first 8 bit: 0x%x ,soulseek ???\n",get_u32(payload, 4),get_u16(payload, 4) >> 16,get_u8(payload, 4) >> 24);
++#endif /* IPP2P_DEBUG_SOUL */
++    }
++
++	/* match 14 00 00 00 01 yy 00 00 00 STRING(YY) 01 00 00 00 00 46|50 00 00 00 00 */
++	/* without size at the beginning !!! */
++	if ( get_u32(payload, 0) == 0x14 && get_u8(payload, 4) == 0x01 )
++	{
++		__u32 y=get_u32(payload, 5);
++		/* we need 19 chars + string */
++		if ( (y + 19) <= (plen) )
++		{
++			const unsigned char *w=payload+9+y;
++			if (get_u32(w, 0) == 0x01 && ( get_u16(w, 4) == 0x4600 || get_u16(w, 4) == 0x5000) && get_u32(w, 6) == 0x00);
++#ifdef IPP2P_DEBUG_SOUL
++	    		printk(KERN_DEBUG "Soulssek special client command recognized\n");
++#endif /* IPP2P_DEBUG_SOUL */
++	    		return ((IPP2P_SOUL * 100) + 9);
++		}
++	}
++    return 0;
++}
++
++
++/*Search for WinMX commands*/
++int
++search_winmx (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_WINMX
++    if (((plen) == 4) && (memcmp(payload, "SEND", 4) == 0))  return ((IPP2P_WINMX * 100) + 1);
++    if (((plen) == 3) && (memcmp(payload, "GET", 3) == 0))  return ((IPP2P_WINMX * 100) + 2);
++    //if (packet_len < (head_len + 10)) return 0;
++    if (plen < 10) return 0;
++
++    if ((memcmp(payload, "SEND", 4) == 0) || (memcmp(payload, "GET", 3) == 0)){
++        u16 c=4;
++        const u16 end=plen-2;
++        u8 count=0;
++        while (c < end)
++        {
++        	if (payload[c]== 0x20 && payload[c+1] == 0x22)
++        	{
++        		c++;
++        		count++;
++        		if (count>=2) return ((IPP2P_WINMX * 100) + 3);
++        	}
++        	c++;
++        }
++    }
++
++    if ( plen == 149 && payload[0] == '8' )
++    {
++#ifdef IPP2P_DEBUG_WINMX
++    	printk(KERN_INFO "maybe WinMX\n");
++#endif
++    	if (get_u32(payload,17) == 0 && get_u32(payload,21) == 0 && get_u32(payload,25) == 0 &&
++//    	    get_u32(payload,33) == __constant_htonl(0x71182b1a) && get_u32(payload,37) == __constant_htonl(0x05050000) &&
++//    	    get_u32(payload,133) == __constant_htonl(0x31097edf) && get_u32(payload,145) == __constant_htonl(0xdcb8f792))
++    	    get_u16(payload,39) == 0 && get_u16(payload,135) == __constant_htons(0x7edf) && get_u16(payload,147) == __constant_htons(0xf792))
++
++    	{
++#ifdef IPP2P_DEBUG_WINMX
++    		printk(KERN_INFO "got WinMX\n");
++#endif
++    		return ((IPP2P_WINMX * 100) + 4);
++    	}
++    }
++    return 0;
++} /*search_winmx*/
++
++
++/*Search for appleJuice commands*/
++int
++search_apple (const unsigned char *payload, const u16 plen)
++{
++    if ( (plen > 7) && (payload[6] == 0x0d) && (payload[7] == 0x0a) && (memcmp(payload, "ajprot", 6) == 0))  return (IPP2P_APPLE * 100);
++
++    return 0;
++}
++
++
++/*Search for BitTorrent commands*/
++int
++search_bittorrent (const unsigned char *payload, const u16 plen)
++{
++    if (plen > 20)
++    {
++	/* test for match 0x13+"BitTorrent protocol" */
++	if (payload[0] == 0x13)
++	{
++		if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100);
++	}
++
++	/* get tracker commandos, all starts with GET /
++	* then it can follow: scrape| announce
++	* and then ?hash_info=
++	*/
++	if (memcmp(payload,"GET /",5) == 0)
++	{
++		/* message scrape */
++		if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1);
++		/* message announce */
++		if ( memcmp(payload+5,"announce?info_hash=",19)==0 ) return (IPP2P_BIT * 100 + 2);
++	}
++    }
++    else
++    {
++    	/* bitcomet encryptes the first packet, so we have to detect another
++    	 * one later in the flow */
++    	 /* first try failed, too many missdetections */
++    	//if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3);
++
++    	/* second try: block request packets */
++    	if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && payload[4] == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 3);
++    }
++
++    return 0;
++}
++
++
++
++/*check for Kazaa get command*/
++int
++search_kazaa (const unsigned char *payload, const u16 plen)
++
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a) && memcmp(payload, "GET /.hash=", 11) == 0)
++	return (IPP2P_DATA_KAZAA * 100);
++
++    return 0;
++}
++
++
++/*check for gnutella get command*/
++int
++search_gnu (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++	if (memcmp(payload, "GET /get/", 9) == 0)	return ((IPP2P_DATA_GNU * 100) + 1);
++	if (memcmp(payload, "GET /uri-res/", 13) == 0) return ((IPP2P_DATA_GNU * 100) + 2);
++    }
++    return 0;
++}
++
++
++/*check for gnutella get commands and other typical data*/
++int
++search_all_gnu (const unsigned char *payload, const u16 plen)
++{
++
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++
++	if (memcmp(payload, "GNUTELLA CONNECT/", 17) == 0) return ((IPP2P_GNU * 100) + 1);
++	if (memcmp(payload, "GNUTELLA/", 9) == 0) return ((IPP2P_GNU * 100) + 2);
++
++
++	if ((memcmp(payload, "GET /get/", 9) == 0) || (memcmp(payload, "GET /uri-res/", 13) == 0))
++	{
++		u16 c=8;
++		const u16 end=plen-22;
++		while (c < end) {
++			if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Gnutella-", 11) == 0) || (memcmp(&payload[c+2], "X-Queue:", 8) == 0)))
++				return ((IPP2P_GNU * 100) + 3);
++			c++;
++		}
++	}
++    }
++    return 0;
++}
++
++
++/*check for KaZaA download commands and other typical data*/
++int
++search_all_kazaa (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++
++	if (memcmp(payload, "GIVE ", 5) == 0) return ((IPP2P_KAZAA * 100) + 1);
++
++    	if (memcmp(payload, "GET /", 5) == 0) {
++		u16 c = 8;
++		const u16 end=plen-22;
++		while (c < end) {
++			if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Kazaa-Username: ", 18) == 0) || (memcmp(&payload[c+2], "User-Agent: PeerEnabler/", 24) == 0)))
++				return ((IPP2P_KAZAA * 100) + 2);
++			c++;
++		}
++	}
++    }
++    return 0;
++}
++
++/*fast check for edonkey file segment transfer command*/
++int
++search_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3)
++	return 0;
++    else {
++	if (payload[5] == 0x47)
++	    return (IPP2P_DATA_EDK * 100);
++	else
++	    return 0;
++    }
++}
++
++
++
++/*intensive but slower search for some edonkey packets including size-check*/
++int
++search_all_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3)
++	return 0;
++    else {
++	//t += head_len;
++	const u16 cmd = get_u16(payload, 1);
++	if (cmd == (plen - 5)) {
++	    switch (payload[5]) {
++		case 0x01: return ((IPP2P_EDK * 100) + 1);	/*Client: hello or Server:hello*/
++		case 0x4c: return ((IPP2P_EDK * 100) + 9);	/*Client: Hello-Answer*/
++	    }
++	}
++	return 0;
++     }
++}
++
++
++/*fast check for Direct Connect send command*/
++int
++search_dc (const unsigned char *payload, const u16 plen)
++{
++
++    if (payload[0] != 0x24 )
++	return 0;
++    else {
++	if (memcmp(&payload[1], "Send|", 5) == 0)
++	    return (IPP2P_DATA_DC * 100);
++	else
++	    return 0;
++    }
++
++}
++
++
++/*intensive but slower check for all direct connect packets*/
++int
++search_all_dc (const unsigned char *payload, const u16 plen)
++{
++//    unsigned char *t = haystack;
++
++    if (payload[0] == 0x24 && payload[plen-1] == 0x7c)
++    {
++    	const unsigned char *t=&payload[1];
++    		/* Client-Hub-Protocol */
++	if (memcmp(t, "Lock ", 5) == 0)	 		return ((IPP2P_DC * 100) + 1);
++	/* Client-Client-Protocol, some are already recognized by client-hub (like lock) */
++	if (memcmp(t, "MyNick ", 7) == 0)	 	return ((IPP2P_DC * 100) + 38);
++    }
++    return 0;
++}
++
++/*check for mute*/
++int
++search_mute (const unsigned char *payload, const u16 plen)
++{
++	if ( plen == 209 || plen == 345 || plen == 473 || plen == 609 || plen == 1121 )
++	{
++		//printk(KERN_DEBUG "size hit: %u",size);
++		if (memcmp(payload,"PublicKey: ",11) == 0 )
++		{
++			return ((IPP2P_MUTE * 100) + 0);
++
++/*			if (memcmp(t+size-14,"\x0aEndPublicKey\x0a",14) == 0)
++			{
++				printk(KERN_DEBUG "end pubic key hit: %u",size);
++
++			}*/
++		}
++	}
++	return 0;
++}
++
++
++/* check for xdcc */
++int
++search_xdcc (const unsigned char *payload, const u16 plen)
++{
++	/* search in small packets only */
++	if (plen > 20 && plen < 200 && payload[plen-1] == 0x0a && payload[plen-2] == 0x0d && memcmp(payload,"PRIVMSG ",8) == 0)
++	{
++
++		u16 x=10;
++		const u16 end=plen - 13;
++
++		/* is seems to be a irc private massage, chedck for xdcc command */
++		while (x < end)
++		{
++			if (payload[x] == ':')
++			{
++				if ( memcmp(&payload[x+1],"xdcc send #",11) == 0 )
++					return ((IPP2P_XDCC * 100) + 0);
++			}
++			x++;
++		}
++	}
++	return 0;
++}
++
++/* search for waste */
++int search_waste(const unsigned char *payload, const u16 plen)
++{
++	if ( plen >= 8 && memcmp(payload,"GET.sha1:",9) == 0)
++		return ((IPP2P_WASTE * 100) + 0);
++
++	return 0;
++}
++
++
++static struct {
++    int command;
++    __u8 short_hand;			/*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (const unsigned char *, const u16);
++} matchlist[] = {
++    {IPP2P_EDK,SHORT_HAND_IPP2P,20, &search_all_edk},
++//    {IPP2P_DATA_KAZAA,SHORT_HAND_DATA,200, &search_kazaa},
++//    {IPP2P_DATA_EDK,SHORT_HAND_DATA,60, &search_edk},
++//    {IPP2P_DATA_DC,SHORT_HAND_DATA,26, &search_dc},
++    {IPP2P_DC,SHORT_HAND_IPP2P,5, search_all_dc},
++//    {IPP2P_DATA_GNU,SHORT_HAND_DATA,40, &search_gnu},
++    {IPP2P_GNU,SHORT_HAND_IPP2P,5, &search_all_gnu},
++    {IPP2P_KAZAA,SHORT_HAND_IPP2P,5, &search_all_kazaa},
++    {IPP2P_BIT,SHORT_HAND_IPP2P,20, &search_bittorrent},
++    {IPP2P_APPLE,SHORT_HAND_IPP2P,5, &search_apple},
++    {IPP2P_SOUL,SHORT_HAND_IPP2P,5, &search_soul},
++    {IPP2P_WINMX,SHORT_HAND_IPP2P,2, &search_winmx},
++    {IPP2P_ARES,SHORT_HAND_IPP2P,5, &search_ares},
++    {IPP2P_MUTE,SHORT_HAND_NONE,200, &search_mute},
++    {IPP2P_WASTE,SHORT_HAND_NONE,5, &search_waste},
++    {IPP2P_XDCC,SHORT_HAND_NONE,5, &search_xdcc},
++    {0,0,0,NULL}
++};
++
++
++static struct {
++    int command;
++    __u8 short_hand;			/*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (unsigned char *, int);
++} udp_list[] = {
++    {IPP2P_KAZAA,SHORT_HAND_IPP2P,14, &udp_search_kazaa},
++    {IPP2P_BIT,SHORT_HAND_IPP2P,23, &udp_search_bit},
++    {IPP2P_GNU,SHORT_HAND_IPP2P,11, &udp_search_gnu},
++    {IPP2P_EDK,SHORT_HAND_IPP2P,9, &udp_search_edk},
++    {IPP2P_DC,SHORT_HAND_IPP2P,12, &udp_search_directconnect},
++    {0,0,0,NULL}
++};
++
++
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++      const struct xt_match *match,
++#endif
++      const void *matchinfo,
++      int offset,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++      unsigned int protoff,
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++      const void *hdr,
++      u_int16_t datalen,
++#endif
++      int *hotdrop)
++{
++    const struct ipt_p2p_info *info = matchinfo;
++    unsigned char  *haystack;
++    struct iphdr *ip = ip_hdr(skb);
++    int p2p_result = 0, i = 0;
++//    int head_len;
++    int hlen = ntohs(ip->tot_len)-(ip->ihl*4);	/*hlen = packet-data length*/
++
++    /*must not be a fragment*/
++    if (offset) {
++	if (info->debug) printk("IPP2P.match: offset found %i \n",offset);
++	return 0;
++    }
++
++    /*make sure that skb is linear*/
++    if(skb_is_nonlinear(skb)){
++	if (info->debug) printk("IPP2P.match: nonlinear skb found\n");
++	return 0;
++    }
++
++
++    haystack=(char *)ip+(ip->ihl*4);		/*haystack = packet data*/
++
++    switch (ip->protocol){
++	case IPPROTO_TCP:		/*what to do with a TCP packet*/
++	{
++	    struct tcphdr *tcph = (void *) ip + ip->ihl * 4;
++
++	    if (tcph->fin) return 0;  /*if FIN bit is set bail out*/
++	    if (tcph->syn) return 0;  /*if SYN bit is set bail out*/
++	    if (tcph->rst) return 0;  /*if RST bit is set bail out*/
++
++	    haystack += tcph->doff * 4; /*get TCP-Header-Size*/
++	    hlen -= tcph->doff * 4;
++	    while (matchlist[i].command) {
++		if ((((info->cmd & matchlist[i].command) == matchlist[i].command) ||
++		    ((info->cmd & matchlist[i].short_hand) == matchlist[i].short_hand)) &&
++		    (hlen > matchlist[i].packet_len)) {
++			    p2p_result = matchlist[i].function_name(haystack, hlen);
++			    if (p2p_result)
++			    {
++				if (info->debug) printk("IPP2P.debug:TCP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
++				    p2p_result, NIPQUAD(ip->saddr),ntohs(tcph->source), NIPQUAD(ip->daddr),ntohs(tcph->dest),hlen);
++				return p2p_result;
++    			    }
++    		}
++	    i++;
++	    }
++	    return p2p_result;
++	}
++
++	case IPPROTO_UDP:		/*what to do with an UDP packet*/
++	{
++	    struct udphdr *udph = (void *) ip + ip->ihl * 4;
++
++	    while (udp_list[i].command){
++		if ((((info->cmd & udp_list[i].command) == udp_list[i].command) ||
++		    ((info->cmd & udp_list[i].short_hand) == udp_list[i].short_hand)) &&
++		    (hlen > udp_list[i].packet_len)) {
++			    p2p_result = udp_list[i].function_name(haystack, hlen);
++			    if (p2p_result){
++				if (info->debug) printk("IPP2P.debug:UDP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
++				    p2p_result, NIPQUAD(ip->saddr),ntohs(udph->source), NIPQUAD(ip->daddr),ntohs(udph->dest),hlen);
++				return p2p_result;
++			    }
++		}
++	    i++;
++	    }
++	    return p2p_result;
++	}
++
++	default: return 0;
++    }
++}
++
++
++
++static int
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++	    const void *ip,
++	    const struct xt_match *match,
++#else
++            const struct ipt_ip *ip,
++#endif
++	    void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	    unsigned int matchsize,
++#endif
++	    unsigned int hook_mask)
++{
++        /* Must specify -p tcp */
++/*    if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
++ *	printk("ipp2p: Only works on TCP packets, use -p tcp\n");
++ *	return 0;
++ *    }*/
++    return 1;
++}
++
++
++
++
++static struct ipt_match ipp2p_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	{ NULL, NULL },
++	"ipp2p",
++	&match,
++	&checkentry,
++	NULL,
++	THIS_MODULE
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	.name		= "ipp2p",
++	.match		= &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++	.matchsize	= sizeof(struct ipt_p2p_info),
++#endif
++	.checkentry	= &checkentry,
++	.me		= THIS_MODULE,
++#endif
++};
++
++
++static int __init init(void)
++{
++    printk(KERN_INFO "IPP2P v%s loading\n", IPP2P_VERSION);
++    return xt_register_match(&ipp2p_match);
++}
++
++static void __exit fini(void)
++{
++    xt_unregister_match(&ipp2p_match);
++    printk(KERN_INFO "IPP2P v%s unloaded\n", IPP2P_VERSION);
++}
++
++module_init(init);
++module_exit(fini);
++
++
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-05-26 20:17:47.626407992 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:21:54.587864144 +0200
+@@ -81,6 +81,12 @@
+ 	help
+ 	  Say Y to get lots of debugging output.
+ 
++config IP_NF_MATCH_IPP2P
++	tristate "IPP2P"
++	depends on IP_NF_IPTABLES
++	help
++	  Module for matching traffic of various Peer-to-Peer applications
++
+ config IP_NF_MATCH_TOS
+ 	tristate "TOS match support"
+ 	depends on IP_NF_IPTABLES
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-05-26 20:17:47.638406168 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:21:54.588863992 +0200
+@@ -49,7 +49,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+-
++obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
+ obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
+ 
+ # targets
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/120-openswan-2.4.0.kernel-2.6-natt.patch
@@ -1,1 +1,170 @@
+diff -urN linux-2.6.21.1.old/include/net/xfrmudp.h linux-2.6.21.1.dev/include/net/xfrmudp.h
+--- linux-2.6.21.1.old/include/net/xfrmudp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/net/xfrmudp.h	2007-05-26 20:24:53.933599448 +0200
+@@ -0,0 +1,10 @@
++/*
++ * pointer to function for type that xfrm4_input wants, to permit
++ * decoupling of XFRM from udp.c
++ */
++#define HAVE_XFRM4_UDP_REGISTER
++
++typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
++extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
++				      , xfrm4_rcv_encap_t *oldfunc);
++extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
+diff -urN linux-2.6.21.1.old/net/ipv4/Kconfig linux-2.6.21.1.dev/net/ipv4/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/Kconfig	2007-05-26 20:24:53.965594584 +0200
+@@ -266,6 +266,12 @@
+ 	  Network), but can be distributed all over the Internet. If you want
+ 	  to do that, say Y here and to "IP multicast routing" below.
+ 
++config IPSEC_NAT_TRAVERSAL
++	bool "IPSEC NAT-Traversal (KLIPS compatible)"
++	depends on INET
++	---help---
++          Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
++
+ config IP_MROUTE
+ 	bool "IP: multicast routing"
+ 	depends on IP_MULTICAST
+diff -urN linux-2.6.21.1.old/net/ipv4/udp.c linux-2.6.21.1.dev/net/ipv4/udp.c
+--- linux-2.6.21.1.old/net/ipv4/udp.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/udp.c	2007-05-26 20:24:53.966594432 +0200
+@@ -101,12 +101,15 @@
+ #include <net/route.h>
+ #include <net/checksum.h>
+ #include <net/xfrm.h>
++#include <net/xfrmudp.h>
+ #include "udp_impl.h"
+ 
+ /*
+  *	Snmp MIB for the UDP layer
+  */
+ 
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func;
++
+ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
+ 
+ struct hlist_head udp_hash[UDP_HTABLE_SIZE];
+@@ -1008,6 +1011,42 @@
+ 	return 0;
+ }
+ 
++#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++
++/* if XFRM isn't a module, then register it directly. */
++#if 0 && !defined(CONFIG_XFRM_MODULE) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = xfrm4_rcv_encap;
++#else
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
++#endif
++
++int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
++			       , xfrm4_rcv_encap_t *oldfunc)
++{
++  if(oldfunc != NULL) {
++    *oldfunc = xfrm4_rcv_encap_func;
++  }
++
++#if 0
++  if(xfrm4_rcv_encap_func != NULL)
++    return -1;
++#endif
++
++  xfrm4_rcv_encap_func = func;
++  return 0;
++}
++
++int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
++{
++  if(xfrm4_rcv_encap_func != func)
++    return -1;
++
++  xfrm4_rcv_encap_func = NULL;
++  return 0;
++}
++#endif /* CONFIG_XFRM_MODULE || CONFIG_IPSEC_NAT_TRAVERSAL */
++
++
+ /* return:
+  * 	1  if the UDP system should process it
+  *	0  if we should drop this packet
+@@ -1015,7 +1054,7 @@
+  */
+ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
+ {
+-#ifndef CONFIG_XFRM
++#if !defined(CONFIG_XFRM) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
+ 	return 1;
+ #else
+ 	struct udp_sock *up = udp_sk(sk);
+@@ -1030,11 +1069,11 @@
+ 	/* if we're overly short, let UDP handle it */
+ 	len = skb->len - sizeof(struct udphdr);
+ 	if (len <= 0)
+-		return 1;
++		return 2;
+ 
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+-		return 1;
++		return 3;
+ 
+ 	/* If this is a paged skb, make sure we pull up
+ 	 * whatever data we need to look at. */
+@@ -1057,7 +1096,7 @@
+ 			len = sizeof(struct udphdr);
+ 		} else
+ 			/* Must be an IKE packet.. pass it through */
+-			return 1;
++			return 4;
+ 		break;
+ 	case UDP_ENCAP_ESPINUDP_NON_IKE:
+ 		/* Check if this is a keepalive packet.  If so, eat it. */
+@@ -1070,7 +1109,7 @@
+ 			len = sizeof(struct udphdr) + 2 * sizeof(u32);
+ 		} else
+ 			/* Must be an IKE packet.. pass it through */
+-			return 1;
++			return 5;
+ 		break;
+ 	}
+ 
+@@ -1081,6 +1120,8 @@
+ 	 */
+ 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ 		return 0;
++	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++		return 0;
+ 
+ 	/* Now we can update and verify the packet length... */
+ 	iph = ip_hdr(skb);
+@@ -1145,9 +1186,13 @@
+ 			return 0;
+ 		}
+ 		if (ret < 0) {
+-			/* process the ESP packet */
+-			ret = xfrm4_rcv_encap(skb, up->encap_type);
+-			UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
++			if(xfrm4_rcv_encap_func != NULL) {
++			  ret = (*xfrm4_rcv_encap_func)(skb, up->encap_type);
++			  UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
++			} else {
++			  UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
++			  ret = 1;
++			}
+ 			return -ret;
+ 		}
+ 		/* FALLTHROUGH -- it's a UDP Packet */
+@@ -1847,3 +1892,9 @@
+ EXPORT_SYMBOL(udp_proc_register);
+ EXPORT_SYMBOL(udp_proc_unregister);
+ #endif
++
++#if defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++EXPORT_SYMBOL(udp4_register_esp_rcvencap);
++EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
++#endif
++
 

--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/130-netfilter-ipset.patch
@@ -1,1 +1,6626 @@
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h	2007-06-08 16:29:31.825808000 -0500
+@@ -0,0 +1,498 @@
++#ifndef _IP_SET_H
++#define _IP_SET_H
++
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++#if 0
++#define IP_SET_DEBUG
++#endif
++
++/*
++ * A sockopt of such quality has hardly ever been seen before on the open
++ * market!  This little beauty, hardly ever used: above 64, so it's
++ * traditionally used for firewalling, not touched (even once!) by the
++ * 2.0, 2.2 and 2.4 kernels!
++ *
++ * Comes with its own certificate of authenticity, valid anywhere in the
++ * Free world!
++ *
++ * Rusty, 19.4.2000
++ */
++#define SO_IP_SET 		83
++
++/*
++ * Heavily modify by Joakim Axelsson 08.03.2002
++ * - Made it more modulebased
++ *
++ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
++ * - bindings added
++ * - in order to "deal with" backward compatibility, renamed to ipset
++ */
++
++/* 
++ * Used so that the kernel module and ipset-binary can match their versions 
++ */
++#define IP_SET_PROTOCOL_VERSION 2
++
++#define IP_SET_MAXNAMELEN 32	/* set names and set typenames */
++
++/* Lets work with our own typedef for representing an IP address.
++ * We hope to make the code more portable, possibly to IPv6...
++ *
++ * The representation works in HOST byte order, because most set types
++ * will perform arithmetic operations and compare operations.
++ * 
++ * For now the type is an uint32_t.
++ *
++ * Make sure to ONLY use the functions when translating and parsing
++ * in order to keep the host byte order and make it more portable:
++ *  parse_ip()
++ *  parse_mask()
++ *  parse_ipandmask()
++ *  ip_tostring()
++ * (Joakim: where are they???)
++ */
++
++typedef uint32_t ip_set_ip_t;
++
++/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
++ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
++ */
++typedef uint16_t ip_set_id_t;
++
++#define IP_SET_INVALID_ID	65535
++
++/* How deep we follow bindings */
++#define IP_SET_MAX_BINDINGS	6
++
++/*
++ * Option flags for kernel operations (ipt_set_info)
++ */
++#define IPSET_SRC 		0x01	/* Source match/add */
++#define IPSET_DST		0x02	/* Destination match/add */
++#define IPSET_MATCH_INV		0x04	/* Inverse matching */
++
++/*
++ * Set features
++ */
++#define IPSET_TYPE_IP		0x01	/* IP address type of set */
++#define IPSET_TYPE_PORT		0x02	/* Port type of set */
++#define IPSET_DATA_SINGLE	0x04	/* Single data storage */
++#define IPSET_DATA_DOUBLE	0x08	/* Double data storage */
++
++/* Reserved keywords */
++#define IPSET_TOKEN_DEFAULT	":default:"
++#define IPSET_TOKEN_ALL		":all:"
++
++/* SO_IP_SET operation constants, and their request struct types.
++ *
++ * Operation ids:
++ *	  0-99:	 commands with version checking
++ *	100-199: add/del/test/bind/unbind
++ *	200-299: list, save, restore
++ */
++
++/* Single shot operations: 
++ * version, create, destroy, flush, rename and swap 
++ *
++ * Sets are identified by name.
++ */
++
++#define IP_SET_REQ_STD		\
++	unsigned op;		\
++	unsigned version;	\
++	char name[IP_SET_MAXNAMELEN]
++
++#define IP_SET_OP_CREATE	0x00000001	/* Create a new (empty) set */
++struct ip_set_req_create {
++	IP_SET_REQ_STD;
++	char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_DESTROY	0x00000002	/* Remove a (empty) set */
++struct ip_set_req_std {
++	IP_SET_REQ_STD;
++};
++
++#define IP_SET_OP_FLUSH		0x00000003	/* Remove all IPs in a set */
++/* Uses ip_set_req_std */
++
++#define IP_SET_OP_RENAME	0x00000004	/* Rename a set */
++/* Uses ip_set_req_create */
++
++#define IP_SET_OP_SWAP		0x00000005	/* Swap two sets */
++/* Uses ip_set_req_create */
++
++union ip_set_name_index {
++	char name[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++};
++
++#define IP_SET_OP_GET_BYNAME	0x00000006	/* Get set index by name */
++struct ip_set_req_get_set {
++	unsigned op;
++	unsigned version;
++	union ip_set_name_index set;
++};
++
++#define IP_SET_OP_GET_BYINDEX	0x00000007	/* Get set name by index */
++/* Uses ip_set_req_get_set */
++
++#define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
++struct ip_set_req_version {
++	unsigned op;
++	unsigned version;
++};
++
++/* Double shots operations: 
++ * add, del, test, bind and unbind.
++ *
++ * First we query the kernel to get the index and type of the target set,
++ * then issue the command. Validity of IP is checked in kernel in order
++ * to minimalize sockopt operations.
++ */
++
++/* Get minimal set data for add/del/test/bind/unbind IP */
++#define IP_SET_OP_ADT_GET	0x00000010	/* Get set and type */
++struct ip_set_req_adt_get {
++	unsigned op;
++	unsigned version;
++	union ip_set_name_index set;
++	char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_REQ_BYINDEX	\
++	unsigned op;		\
++	ip_set_id_t index;
++
++struct ip_set_req_adt {
++	IP_SET_REQ_BYINDEX;
++};
++
++#define IP_SET_OP_ADD_IP	0x00000101	/* Add an IP to a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_DEL_IP	0x00000102	/* Remove an IP from a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_TEST_IP	0x00000103	/* Test an IP in a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_BIND_SET	0x00000104	/* Bind an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++struct ip_set_req_bind {
++	IP_SET_REQ_BYINDEX;
++	char binding[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_UNBIND_SET	0x00000105	/* Unbind an IP from a set */
++/* Uses ip_set_req_bind, with type speficic addage 
++ * index = 0 means unbinding for all sets */
++
++#define IP_SET_OP_TEST_BIND_SET	0x00000106	/* Test binding an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++
++/* Multiple shots operations: list, save, restore.
++ *
++ * - check kernel version and query the max number of sets
++ * - get the basic information on all sets
++ *   and size required for the next step
++ * - get actual set data: header, data, bindings
++ */
++
++/* Get max_sets and the index of a queried set
++ */
++#define IP_SET_OP_MAX_SETS	0x00000020
++struct ip_set_req_max_sets {
++	unsigned op;
++	unsigned version;
++	ip_set_id_t max_sets;		/* max_sets */
++	ip_set_id_t sets;		/* real number of sets */
++	union ip_set_name_index set;	/* index of set if name used */
++};
++
++/* Get the id and name of the sets plus size for next step */
++#define IP_SET_OP_LIST_SIZE	0x00000201
++#define IP_SET_OP_SAVE_SIZE	0x00000202
++struct ip_set_req_setnames {
++	unsigned op;
++	ip_set_id_t index;		/* set to list/save */
++	size_t size;			/* size to get setdata/bindings */
++	/* followed by sets number of struct ip_set_name_list */
++};
++
++struct ip_set_name_list {
++	char name[IP_SET_MAXNAMELEN];
++	char typename[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++	ip_set_id_t id;
++};
++
++/* The actual list operation */
++#define IP_SET_OP_LIST		0x00000203
++struct ip_set_req_list {
++	IP_SET_REQ_BYINDEX;
++	/* sets number of struct ip_set_list in reply */ 
++};
++
++struct ip_set_list {
++	ip_set_id_t index;
++	ip_set_id_t binding;
++	u_int32_t ref;
++	size_t header_size;	/* Set header data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++	size_t bindings_size;	/* Set bindings data of bindings_size */
++};
++
++struct ip_set_hash_list {
++	ip_set_ip_t ip;
++	ip_set_id_t binding;
++};
++
++/* The save operation */
++#define IP_SET_OP_SAVE		0x00000204
++/* Uses ip_set_req_list, in the reply replaced by
++ * sets number of struct ip_set_save plus a marker
++ * ip_set_save followed by ip_set_hash_save structures.
++ */
++struct ip_set_save {
++	ip_set_id_t index;
++	ip_set_id_t binding;
++	size_t header_size;	/* Set header data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++};
++
++/* At restoring, ip == 0 means default binding for the given set: */
++struct ip_set_hash_save {
++	ip_set_ip_t ip;
++	ip_set_id_t id;
++	ip_set_id_t binding;
++};
++
++/* The restore operation */
++#define IP_SET_OP_RESTORE	0x00000205
++/* Uses ip_set_req_setnames followed by ip_set_restore structures
++ * plus a marker ip_set_restore, followed by ip_set_hash_save 
++ * structures.
++ */
++struct ip_set_restore {
++	char name[IP_SET_MAXNAMELEN];
++	char typename[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++	size_t header_size;	/* Create data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++};
++
++static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
++{
++	return 4 * ((((b - a + 8) / 8) + 3) / 4);
++}
++
++#ifdef __KERNEL__
++
++#define ip_set_printk(format, args...) 			\
++	do {							\
++		printk("%s: %s: ", __FILE__, __FUNCTION__);	\
++		printk(format "\n" , ## args);			\
++	} while (0)
++
++#if defined(IP_SET_DEBUG)
++#define DP(format, args...) 					\
++	do {							\
++		printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
++		printk(format "\n" , ## args);			\
++	} while (0)
++#define IP_SET_ASSERT(x)					\
++	do {							\
++		if (!(x))					\
++			printk("IP_SET_ASSERT: %s:%i(%s)\n",	\
++				__FILE__, __LINE__, __FUNCTION__); \
++	} while (0)
++#else
++#define DP(format, args...)
++#define IP_SET_ASSERT(x)
++#endif
++
++struct ip_set;
++
++/*
++ * The ip_set_type definition - one per set type, e.g. "ipmap".
++ *
++ * Each individual set has a pointer, set->type, going to one
++ * of these structures. Function pointers inside the structure implement
++ * the real behaviour of the sets.
++ *
++ * If not mentioned differently, the implementation behind the function
++ * pointers of a set_type, is expected to return 0 if ok, and a negative
++ * errno (e.g. -EINVAL) on error.
++ */
++struct ip_set_type {
++	struct list_head list;	/* next in list of set types */
++
++	/* test for IP in set (kernel: iptables -m set src|dst)
++	 * return 0 if not in set, 1 if in set.
++	 */
++	int (*testip_kernel) (struct ip_set *set,
++			      const struct sk_buff * skb, 
++			      ip_set_ip_t *ip,
++			      const u_int32_t *flags,
++			      unsigned char index);
++
++	/* test for IP in set (userspace: ipset -T set IP)
++	 * return 0 if not in set, 1 if in set.
++	 */
++	int (*testip) (struct ip_set *set,
++		       const void *data, size_t size,
++		       ip_set_ip_t *ip);
++
++	/*
++	 * Size of the data structure passed by when
++	 * adding/deletin/testing an entry.
++	 */
++	size_t reqsize;
++
++	/* Add IP into set (userspace: ipset -A set IP)
++	 * Return -EEXIST if the address is already in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address was not already in the set, 0 is returned.
++	 */
++	int (*addip) (struct ip_set *set, 
++		      const void *data, size_t size,
++		      ip_set_ip_t *ip);
++
++	/* Add IP into set (kernel: iptables ... -j SET set src|dst)
++	 * Return -EEXIST if the address is already in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address was not already in the set, 0 is returned.
++	 */
++	int (*addip_kernel) (struct ip_set *set,
++			     const struct sk_buff * skb, 
++			     ip_set_ip_t *ip,
++			     const u_int32_t *flags,
++			     unsigned char index);
++
++	/* remove IP from set (userspace: ipset -D set --entry x)
++	 * Return -EEXIST if the address is NOT in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address really was in the set, 0 is returned.
++	 */
++	int (*delip) (struct ip_set *set, 
++		      const void *data, size_t size,
++		      ip_set_ip_t *ip);
++
++	/* remove IP from set (kernel: iptables ... -j SET --entry x)
++	 * Return -EEXIST if the address is NOT in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address really was in the set, 0 is returned.
++	 */
++	int (*delip_kernel) (struct ip_set *set,
++			     const struct sk_buff * skb, 
++			     ip_set_ip_t *ip,
++			     const u_int32_t *flags,
++			     unsigned char index);
++
++	/* new set creation - allocated type specific items
++	 */
++	int (*create) (struct ip_set *set,
++		       const void *data, size_t size);
++
++	/* retry the operation after successfully tweaking the set
++	 */
++	int (*retry) (struct ip_set *set);
++
++	/* set destruction - free type specific items
++	 * There is no return value.
++	 * Can be called only when child sets are destroyed.
++	 */
++	void (*destroy) (struct ip_set *set);
++
++	/* set flushing - reset all bits in the set, or something similar.
++	 * There is no return value.
++	 */
++	void (*flush) (struct ip_set *set);
++
++	/* Listing: size needed for header
++	 */
++	size_t header_size;
++
++	/* Listing: Get the header
++	 *
++	 * Fill in the information in "data".
++	 * This function is always run after list_header_size() under a 
++	 * writelock on the set. Therefor is the length of "data" always 
++	 * correct. 
++	 */
++	void (*list_header) (const struct ip_set *set, 
++			     void *data);
++
++	/* Listing: Get the size for the set members
++	 */
++	int (*list_members_size) (const struct ip_set *set);
++
++	/* Listing: Get the set members
++	 *
++	 * Fill in the information in "data".
++	 * This function is always run after list_member_size() under a 
++	 * writelock on the set. Therefor is the length of "data" always 
++	 * correct. 
++	 */
++	void (*list_members) (const struct ip_set *set,
++			      void *data);
++
++	char typename[IP_SET_MAXNAMELEN];
++	unsigned char features;
++	int protocol_version;
++
++	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
++	struct module *me;
++};
++
++extern int ip_set_register_set_type(struct ip_set_type *set_type);
++extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
++
++/* A generic ipset */
++struct ip_set {
++	char name[IP_SET_MAXNAMELEN];	/* the name of the set */
++	rwlock_t lock;			/* lock for concurrency control */
++	ip_set_id_t id;			/* set id for swapping */
++	ip_set_id_t binding;		/* default binding for the set */
++	atomic_t ref;			/* in kernel and in hash references */
++	struct ip_set_type *type; 	/* the set types */
++	void *data;			/* pooltype specific data */
++};
++
++/* Structure to bind set elements to sets */
++struct ip_set_hash {
++	struct list_head list;		/* list of clashing entries in hash */
++	ip_set_ip_t ip;			/* ip from set */
++	ip_set_id_t id;			/* set id */
++	ip_set_id_t binding;		/* set we bind the element to */
++};
++
++/* register and unregister set references */
++extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
++extern void ip_set_put(ip_set_id_t id);
++
++/* API for iptables set match, and SET target */
++extern void ip_set_addip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++extern void ip_set_delip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++extern int ip_set_testip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++
++#endif				/* __KERNEL__ */
++
++#endif /*_IP_SET_H*/
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,30 @@
++#ifndef __IP_SET_IPHASH_H
++#define __IP_SET_IPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iphash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iphash {
++	ip_set_ip_t *members;		/* the iphash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	ip_set_ip_t netmask;		/* netmask */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_iphash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++	ip_set_ip_t netmask;
++};
++
++struct ip_set_req_iphash {
++	ip_set_ip_t ip;
++};
++
++#endif	/* __IP_SET_IPHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,56 @@
++#ifndef __IP_SET_IPMAP_H
++#define __IP_SET_IPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipmap"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_ipmap {
++	void *members;			/* the ipmap proper */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	ip_set_ip_t netmask;		/* subnet netmask */
++	ip_set_ip_t sizeid;		/* size of set in IPs */
++	ip_set_ip_t hosts;		/* number of hosts in a subnet */
++};
++
++struct ip_set_req_ipmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++	ip_set_ip_t netmask;
++};
++
++struct ip_set_req_ipmap {
++	ip_set_ip_t ip;
++};
++
++unsigned int
++mask_to_bits(ip_set_ip_t mask)
++{
++	unsigned int bits = 32;
++	ip_set_ip_t maskaddr;
++	
++	if (mask == 0xFFFFFFFF)
++		return bits;
++	
++	maskaddr = 0xFFFFFFFE;
++	while (--bits >= 0 && maskaddr != mask)
++		maskaddr <<= 1;
++	
++	return bits;
++}
++
++ip_set_ip_t
++range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
++{
++	ip_set_ip_t mask = 0xFFFFFFFE;
++	
++	*bits = 32;
++	while (--(*bits) >= 0 && mask && (to & mask) != from)
++		mask <<= 1;
++		
++	return mask;
++}
++	
++#endif /* __IP_SET_IPMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,34 @@
++#ifndef __IP_SET_IPPORTHASH_H
++#define __IP_SET_IPPORTHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipporthash"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT	(MAX_RANGE + 1)
++
++struct ip_set_ipporthash {
++	ip_set_ip_t *members;		/* the ipporthash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipporthash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++};
++
++struct ip_set_req_ipporthash {
++	ip_set_ip_t ip;
++	ip_set_ip_t port;
++};
++
++#endif	/* __IP_SET_IPPORTHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREE_H
++#define __IP_SET_IPTREE_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptree"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iptreed {
++	unsigned long expires[256];	   	/* x.x.x.ADDR */
++};
++
++struct ip_set_iptreec {
++	struct ip_set_iptreed *tree[256];	/* x.x.ADDR.* */
++};
++
++struct ip_set_iptreeb {
++	struct ip_set_iptreec *tree[256];	/* x.ADDR.*.* */
++};
++
++struct ip_set_iptree {
++	unsigned int timeout;
++	unsigned int gc_interval;
++#ifdef __KERNEL__
++	uint32_t elements;		/* number of elements */
++	struct timer_list gc;
++	struct ip_set_iptreeb *tree[256];	/* ADDR.*.*.* */
++#endif
++};
++
++struct ip_set_req_iptree_create {
++	unsigned int timeout;
++};
++
++struct ip_set_req_iptree {
++	ip_set_ip_t ip;
++	unsigned int timeout;
++};
++
++#endif	/* __IP_SET_IPTREE_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,148 @@
++#ifndef _LINUX_IPSET_JHASH_H
++#define _LINUX_IPSET_JHASH_H
++
++/* This is a copy of linux/jhash.h but the types u32/u8 are changed
++ * to __u32/__u8 so that the header file can be included into
++ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
++ */
++
++/* jhash.h: Jenkins hash support.
++ *
++ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ *
++ * http://burtleburtle.net/bob/hash/
++ *
++ * These are the credits from Bob's sources:
++ *
++ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
++ * hash(), hash2(), hash3, and mix() are externally useful functions.
++ * Routines to test the hash are included if SELF_TEST is defined.
++ * You can use this free for any purpose.  It has no warranty.
++ *
++ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ *
++ * I've modified Bob's hash to be useful in the Linux kernel, and
++ * any bugs present are surely my fault.  -DaveM
++ */
++
++/* NOTE: Arguments are modified. */
++#define __jhash_mix(a, b, c) \
++{ \
++  a -= b; a -= c; a ^= (c>>13); \
++  b -= c; b -= a; b ^= (a<<8); \
++  c -= a; c -= b; c ^= (b>>13); \
++  a -= b; a -= c; a ^= (c>>12);  \
++  b -= c; b -= a; b ^= (a<<16); \
++  c -= a; c -= b; c ^= (b>>5); \
++  a -= b; a -= c; a ^= (c>>3);  \
++  b -= c; b -= a; b ^= (a<<10); \
++  c -= a; c -= b; c ^= (b>>15); \
++}
++
++/* The golden ration: an arbitrary value */
++#define JHASH_GOLDEN_RATIO	0x9e3779b9
++
++/* The most generic version, hashes an arbitrary sequence
++ * of bytes.  No alignment or length assumptions are made about
++ * the input key.
++ */
++static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++{
++	__u32 a, b, c, len;
++	__u8 *k = key;
++
++	len = length;
++	a = b = JHASH_GOLDEN_RATIO;
++	c = initval;
++
++	while (len >= 12) {
++		a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
++		b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
++		c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
++
++		__jhash_mix(a,b,c);
++
++		k += 12;
++		len -= 12;
++	}
++
++	c += length;
++	switch (len) {
++	case 11: c += ((__u32)k[10]<<24);
++	case 10: c += ((__u32)k[9]<<16);
++	case 9 : c += ((__u32)k[8]<<8);
++	case 8 : b += ((__u32)k[7]<<24);
++	case 7 : b += ((__u32)k[6]<<16);
++	case 6 : b += ((__u32)k[5]<<8);
++	case 5 : b += k[4];
++	case 4 : a += ((__u32)k[3]<<24);
++	case 3 : a += ((__u32)k[2]<<16);
++	case 2 : a += ((__u32)k[1]<<8);
++	case 1 : a += k[0];
++	};
++
++	__jhash_mix(a,b,c);
++
++	return c;
++}
++
++/* A special optimized version that handles 1 or more of __u32s.
++ * The length parameter here is the number of __u32s in the key.
++ */
++static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++{
++	__u32 a, b, c, len;
++
++	a = b = JHASH_GOLDEN_RATIO;
++	c = initval;
++	len = length;
++
++	while (len >= 3) {
++		a += k[0];
++		b += k[1];
++		c += k[2];
++		__jhash_mix(a, b, c);
++		k += 3; len -= 3;
++	}
++
++	c += length * 4;
++
++	switch (len) {
++	case 2 : b += k[1];
++	case 1 : a += k[0];
++	};
++
++	__jhash_mix(a,b,c);
++
++	return c;
++}
++
++
++/* A special ultra-optimized versions that knows they are hashing exactly
++ * 3, 2 or 1 word(s).
++ *
++ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
++ *       done at the end is not done here.
++ */
++static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++{
++	a += JHASH_GOLDEN_RATIO;
++	b += JHASH_GOLDEN_RATIO;
++	c += initval;
++
++	__jhash_mix(a, b, c);
++
++	return c;
++}
++
++static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++{
++	return jhash_3words(a, b, 0, initval);
++}
++
++static inline __u32 jhash_1word(__u32 a, __u32 initval)
++{
++	return jhash_3words(a, 0, 0, initval);
++}
++
++#endif /* _LINUX_IPSET_JHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,38 @@
++#ifndef __IP_SET_MACIPMAP_H
++#define __IP_SET_MACIPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "macipmap"
++#define MAX_RANGE 0x0000FFFF
++
++/* general flags */
++#define IPSET_MACIP_MATCHUNSET	1
++
++/* per ip flags */
++#define IPSET_MACIP_ISSET	1
++
++struct ip_set_macipmap {
++	void *members;			/* the macipmap proper */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	u_int32_t flags;
++};
++
++struct ip_set_req_macipmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++	u_int32_t flags;
++};
++
++struct ip_set_req_macipmap {
++	ip_set_ip_t ip;
++	unsigned char ethernet[ETH_ALEN];
++};
++
++struct ip_set_macip {
++	unsigned short flags;
++	unsigned char ethernet[ETH_ALEN];
++};
++
++#endif	/* __IP_SET_MACIPMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,116 @@
++#ifndef _IP_SET_MALLOC_H
++#define _IP_SET_MALLOC_H
++
++#ifdef __KERNEL__
++
++/* Memory allocation and deallocation */
++static size_t max_malloc_size = 0;
++
++static inline void init_max_malloc_size(void)
++{
++#define CACHE(x) max_malloc_size = x;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++}
++
++static inline void * ip_set_malloc(size_t bytes)
++{
++	if (bytes > max_malloc_size)
++		return vmalloc(bytes);
++	else
++		return kmalloc(bytes, GFP_KERNEL);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++	if (bytes > max_malloc_size)
++		vfree(data);
++	else
++		kfree(data);
++}
++
++struct harray {
++	size_t max_elements;
++	void *arrays[0];
++};
++
++static inline void * 
++harray_malloc(size_t hashsize, size_t typesize, int flags)
++{
++	struct harray *harray;
++	size_t max_elements, size, i, j;
++
++	if (!max_malloc_size)
++		init_max_malloc_size();
++
++	if (typesize > max_malloc_size)
++		return NULL;
++
++	max_elements = max_malloc_size/typesize;
++	size = hashsize/max_elements;
++	if (hashsize % max_elements)
++		size++;
++	
++	/* Last pointer signals end of arrays */
++	harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
++			 flags);
++
++	if (!harray)
++		return NULL;
++	
++	for (i = 0; i < size - 1; i++) {
++		harray->arrays[i] = kmalloc(max_elements * typesize, flags);
++		if (!harray->arrays[i])
++			goto undo;
++		memset(harray->arrays[i], 0, max_elements * typesize);
++	}
++	harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize, 
++				    flags);
++	if (!harray->arrays[i])
++		goto undo;
++	memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
++
++	harray->max_elements = max_elements;
++	harray->arrays[size] = NULL;
++	
++	return (void *)harray;
++
++    undo:
++    	for (j = 0; j < i; j++) {
++    		kfree(harray->arrays[j]);
++    	}
++    	kfree(harray);
++    	return NULL;
++}
++
++static inline void harray_free(void *h)
++{
++	struct harray *harray = (struct harray *) h;
++	size_t i;
++	
++    	for (i = 0; harray->arrays[i] != NULL; i++)
++    		kfree(harray->arrays[i]);
++    	kfree(harray);
++}
++
++static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
++{
++	struct harray *harray = (struct harray *) h;
++	size_t i;
++	
++    	for (i = 0; harray->arrays[i+1] != NULL; i++)
++		memset(harray->arrays[i], 0, harray->max_elements * typesize);
++	memset(harray->arrays[i], 0, 
++	       (hashsize - i * harray->max_elements) * typesize);
++}
++
++#define HARRAY_ELEM(h, type, which)				\
++({								\
++	struct harray *__h = (struct harray *)(h);		\
++	((type)((__h)->arrays[(which)/(__h)->max_elements])	\
++		+ (which)%(__h)->max_elements);			\
++})
++
++#endif				/* __KERNEL__ */
++
++#endif /*_IP_SET_MALLOC_H*/
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,55 @@
++#ifndef __IP_SET_NETHASH_H
++#define __IP_SET_NETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "nethash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_nethash {
++	ip_set_ip_t *members;		/* the nethash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	unsigned char cidr[30];		/* CIDR sizes */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_nethash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++};
++
++struct ip_set_req_nethash {
++	ip_set_ip_t ip;
++	unsigned char cidr;
++};
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t 
++pack(ip_set_ip_t ip, unsigned char cidr)
++{
++	ip_set_ip_t addr, *paddr = &addr;
++	unsigned char n, t, *a;
++
++	addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++	DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++	n = cidr / 8;
++	t = cidr % 8;	
++	a = &((unsigned char *)paddr)[n];
++	*a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++	DP("n: %u, t: %u, a: %u", n, t, *a);
++	DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++	   HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++	return ntohl(addr);
++}
++
++#endif	/* __IP_SET_NETHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,25 @@
++#ifndef __IP_SET_PORTMAP_H
++#define __IP_SET_PORTMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME	"portmap"
++#define MAX_RANGE	0x0000FFFF
++#define INVALID_PORT	(MAX_RANGE + 1)
++
++struct ip_set_portmap {
++	void *members;			/* the portmap proper */
++	ip_set_ip_t first_port;		/* host byte order, included in range */
++	ip_set_ip_t last_port;		/* host byte order, included in range */
++};
++
++struct ip_set_req_portmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++};
++
++struct ip_set_req_portmap {
++	ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_PORTMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,21 @@
++#ifndef _IPT_SET_H
++#define _IPT_SET_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++struct ipt_set_info {
++	ip_set_id_t index;
++	u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
++};
++
++/* match info */
++struct ipt_set_info_match {
++	struct ipt_set_info match_set;
++};
++
++struct ipt_set_info_target {
++	struct ipt_set_info add_set;
++	struct ipt_set_info del_set;
++};
++
++#endif /*_IPT_SET_H*/
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,2001 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module for IP set management */
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kmod.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <asm/semaphore.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/ip_set.h>
++
++static struct list_head set_type_list;		/* all registered sets */
++static struct ip_set **ip_set_list;		/* all individual sets */
++static DEFINE_RWLOCK(ip_set_lock);		/* protects the lists and the hash */
++static DECLARE_MUTEX(ip_set_app_mutex);		/* serializes user access */
++static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
++static ip_set_id_t ip_set_bindings_hash_size =  CONFIG_IP_NF_SET_HASHSIZE;
++static struct list_head *ip_set_hash;		/* hash of bindings */
++static unsigned int ip_set_hash_random;		/* random seed */
++
++/*
++ * Sets are identified either by the index in ip_set_list or by id.
++ * The id never changes and is used to find a key in the hash. 
++ * The index may change by swapping and used at all other places 
++ * (set/SET netfilter modules, binding value, etc.)
++ *
++ * Userspace requests are serialized by ip_set_mutex and sets can
++ * be deleted only from userspace. Therefore ip_set_list locking 
++ * must obey the following rules:
++ *
++ * - kernel requests: read and write locking mandatory
++ * - user requests: read locking optional, write locking mandatory
++ */
++
++static inline void
++__ip_set_get(ip_set_id_t index)
++{
++	atomic_inc(&ip_set_list[index]->ref);
++}
++
++static inline void
++__ip_set_put(ip_set_id_t index)
++{
++	atomic_dec(&ip_set_list[index]->ref);
++}
++
++/*
++ * Binding routines
++ */
++
++static inline struct ip_set_hash *
++__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
++{
++	struct ip_set_hash *set_hash;
++
++	list_for_each_entry(set_hash, &ip_set_hash[key], list)
++		if (set_hash->id == id && set_hash->ip == ip)
++			return set_hash;
++			
++	return NULL;
++}
++
++static ip_set_id_t
++ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random) 
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++
++	ASSERT_READ_LOCK(&ip_set_lock);
++	IP_SET_ASSERT(ip_set_list[id]);
++	DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));	
++	
++	set_hash = __ip_set_find(key, id, ip);
++	
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name, 
++	   HIPQUAD(ip),
++	   set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++	return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
++}
++
++static inline void 
++__set_hash_del(struct ip_set_hash *set_hash)
++{
++	ASSERT_WRITE_LOCK(&ip_set_lock);
++	IP_SET_ASSERT(ip_set_list[set_hash->binding]);	
++
++	__ip_set_put(set_hash->binding);
++	list_del(&set_hash->list);
++	kfree(set_hash);
++}
++
++static int
++ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++	
++	IP_SET_ASSERT(ip_set_list[id]);
++	DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));	
++	write_lock_bh(&ip_set_lock);
++	set_hash = __ip_set_find(key, id, ip);
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++	   HIPQUAD(ip),
++	   set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++	if (set_hash != NULL)
++		__set_hash_del(set_hash);
++	write_unlock_bh(&ip_set_lock);
++	return 0;
++}
++
++static int 
++ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++	int ret = 0;
++	
++	IP_SET_ASSERT(ip_set_list[id]);
++	IP_SET_ASSERT(ip_set_list[binding]);
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name, 
++	   HIPQUAD(ip), ip_set_list[binding]->name);
++	write_lock_bh(&ip_set_lock);
++	set_hash = __ip_set_find(key, id, ip);
++	if (!set_hash) {
++		set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
++		if (!set_hash) {
++			ret = -ENOMEM;
++			goto unlock;
++		}
++		INIT_LIST_HEAD(&set_hash->list);
++		set_hash->id = id;
++		set_hash->ip = ip;
++		list_add(&set_hash->list, &ip_set_hash[key]);
++	} else {
++		IP_SET_ASSERT(ip_set_list[set_hash->binding]);	
++		DP("overwrite binding: %s",
++		   ip_set_list[set_hash->binding]->name);
++		__ip_set_put(set_hash->binding);
++	}
++	set_hash->binding = binding;
++	__ip_set_get(set_hash->binding);
++	DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
++	   key, id, ip_set_list[id]->name,
++	   HIPQUAD(ip), binding, ip_set_list[binding]->name);
++    unlock:
++	write_unlock_bh(&ip_set_lock);
++	return ret;
++}
++
++#define FOREACH_HASH_DO(fn, args...) 						\
++({										\
++	ip_set_id_t __key;							\
++	struct ip_set_hash *__set_hash;						\
++										\
++	for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {		\
++		list_for_each_entry(__set_hash, &ip_set_hash[__key], list)	\
++			fn(__set_hash , ## args);				\
++	}									\
++})
++
++#define FOREACH_HASH_RW_DO(fn, args...) 						\
++({										\
++	ip_set_id_t __key;							\
++	struct ip_set_hash *__set_hash, *__n;					\
++										\
++	ASSERT_WRITE_LOCK(&ip_set_lock);					\
++	for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {		\
++		list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
++			fn(__set_hash , ## args);				\
++	}									\
++})
++
++/* Add, del and test set entries from kernel */
++
++#define follow_bindings(index, set, ip)					\
++((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID	\
++ || (index = (set)->binding) != IP_SET_INVALID_ID)
++
++int
++ip_set_testip_kernel(ip_set_id_t index,
++		     const struct sk_buff *skb,
++		     const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++	
++	IP_SET_ASSERT(flags[i]);
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		read_lock_bh(&set->lock);
++		res = set->type->testip_kernel(set, skb, &ip, flags, i++);
++		read_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while (res > 0 
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++
++	return res;
++}
++
++void
++ip_set_addip_kernel(ip_set_id_t index,
++		    const struct sk_buff *skb,
++		    const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++
++	IP_SET_ASSERT(flags[i]);
++   retry:
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		write_lock_bh(&set->lock);
++		res = set->type->addip_kernel(set, skb, &ip, flags, i++);
++		write_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while ((res == 0 || res == -EEXIST)
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++
++	if (res == -EAGAIN
++	    && set->type->retry
++	    && (res = set->type->retry(set)) == 0)
++	    	goto retry;
++}
++
++void
++ip_set_delip_kernel(ip_set_id_t index,
++		    const struct sk_buff *skb,
++		    const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++
++	IP_SET_ASSERT(flags[i]);
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		write_lock_bh(&set->lock);
++		res = set->type->delip_kernel(set, skb, &ip, flags, i++);
++		write_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while ((res == 0 || res == -EEXIST)
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++}
++
++/* Register and deregister settype */
++
++static inline struct ip_set_type *
++find_set_type(const char *name)
++{
++	struct ip_set_type *set_type;
++
++	list_for_each_entry(set_type, &set_type_list, list)
++		if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
++			return set_type;
++	return NULL;
++}
++
++int 
++ip_set_register_set_type(struct ip_set_type *set_type)
++{
++	int ret = 0;
++	
++	if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
++		ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
++			      set_type->typename,
++			      set_type->protocol_version,
++			      IP_SET_PROTOCOL_VERSION);
++		return -EINVAL;
++	}
++
++	write_lock_bh(&ip_set_lock);
++	if (find_set_type(set_type->typename)) {
++		/* Duplicate! */
++		ip_set_printk("'%s' already registered!", 
++			      set_type->typename);
++		ret = -EINVAL;
++		goto unlock;
++	}
++	if (!try_module_get(THIS_MODULE)) {
++		ret = -EFAULT;
++		goto unlock;
++	}
++	list_add(&set_type->list, &set_type_list);
++	DP("'%s' registered.", set_type->typename);
++   unlock:
++	write_unlock_bh(&ip_set_lock);
++	return ret;
++}
++
++void
++ip_set_unregister_set_type(struct ip_set_type *set_type)
++{
++	write_lock_bh(&ip_set_lock);
++	if (!find_set_type(set_type->typename)) {
++		ip_set_printk("'%s' not registered?",
++			      set_type->typename);
++		goto unlock;
++	}
++	list_del(&set_type->list);
++	module_put(THIS_MODULE);
++	DP("'%s' unregistered.", set_type->typename);
++   unlock:
++	write_unlock_bh(&ip_set_lock);
++
++}
++
++/*
++ * Userspace routines
++ */
++
++/*
++ * Find set by name, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byname(const char *name)
++{
++	ip_set_id_t i, index = IP_SET_INVALID_ID;
++	
++	down(&ip_set_app_mutex);
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strcmp(ip_set_list[i]->name, name) == 0) {
++			__ip_set_get(i);
++			index = i;
++			break;
++		}
++	}
++	up(&ip_set_app_mutex);
++	return index;
++}
++
++/*
++ * Find set by index, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byindex(ip_set_id_t index)
++{
++	down(&ip_set_app_mutex);
++
++	if (index >= ip_set_max)
++		return IP_SET_INVALID_ID;
++	
++	if (ip_set_list[index])
++		__ip_set_get(index);
++	else
++		index = IP_SET_INVALID_ID;
++		
++	up(&ip_set_app_mutex);
++	return index;
++}
++
++/*
++ * If the given set pointer points to a valid set, decrement
++ * reference count by 1. The caller shall not assume the index
++ * to be valid, after calling this function.
++ */
++void ip_set_put(ip_set_id_t index)
++{
++	down(&ip_set_app_mutex);
++	if (ip_set_list[index])
++		__ip_set_put(index);
++	up(&ip_set_app_mutex);
++}
++
++/* Find a set by name or index */
++static ip_set_id_t
++ip_set_find_byname(const char *name)
++{
++	ip_set_id_t i, index = IP_SET_INVALID_ID;
++	
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strcmp(ip_set_list[i]->name, name) == 0) {
++			index = i;
++			break;
++		}
++	}
++	return index;
++}
++
++static ip_set_id_t
++ip_set_find_byindex(ip_set_id_t index)
++{
++	if (index >= ip_set_max || ip_set_list[index] == NULL)
++		index = IP_SET_INVALID_ID;
++	
++	return index;
++}
++
++/*
++ * Add, del, test, bind and unbind
++ */
++
++static inline int
++__ip_set_testip(struct ip_set *set,
++	        const void *data,
++	        size_t size,
++	        ip_set_ip_t *ip)
++{
++	int res;
++
++	read_lock_bh(&set->lock);
++	res = set->type->testip(set, data, size, ip);
++	read_unlock_bh(&set->lock);
++
++	return res;
++}
++
++static int
++__ip_set_addip(ip_set_id_t index,
++	       const void *data,
++	       size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++	
++	IP_SET_ASSERT(set);
++	do {
++		write_lock_bh(&set->lock);
++		res = set->type->addip(set, data, size, &ip);
++		write_unlock_bh(&set->lock);
++	} while (res == -EAGAIN
++		 && set->type->retry
++		 && (res = set->type->retry(set)) == 0);
++
++	return res;
++}
++
++static int
++ip_set_addip(ip_set_id_t index,
++	     const void *data,
++	     size_t size)
++{
++
++	return __ip_set_addip(index,
++			      data + sizeof(struct ip_set_req_adt),
++			      size - sizeof(struct ip_set_req_adt));
++}
++
++static int
++ip_set_delip(ip_set_id_t index,
++	     const void *data,
++	     size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++	
++	IP_SET_ASSERT(set);
++	write_lock_bh(&set->lock);
++	res = set->type->delip(set,
++			       data + sizeof(struct ip_set_req_adt),
++			       size - sizeof(struct ip_set_req_adt),
++			       &ip);
++	write_unlock_bh(&set->lock);
++
++	return res;
++}
++
++static int
++ip_set_testip(ip_set_id_t index,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_adt),
++			      size - sizeof(struct ip_set_req_adt),
++			      &ip);
++
++	return (res > 0 ? -EEXIST : res);
++}
++
++static int
++ip_set_bindip(ip_set_id_t index,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_req_bind *req_bind;
++	ip_set_id_t binding;
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of a set */
++		char *binding_name;
++		
++		if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++			return -EINVAL;
++
++		binding_name = (char *)(data + sizeof(struct ip_set_req_bind));	
++		binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		binding = ip_set_find_byname(binding_name);
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++
++		write_lock_bh(&ip_set_lock);
++		/* Sets as binding values are referenced */
++		if (set->binding != IP_SET_INVALID_ID)
++			__ip_set_put(set->binding);
++		set->binding = binding;
++		__ip_set_get(set->binding);
++		write_unlock_bh(&ip_set_lock);
++
++		return 0;
++	}
++	binding = ip_set_find_byname(req_bind->binding);
++	if (binding == IP_SET_INVALID_ID)
++		return -ENOENT;
++
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++	DP("set %s, ip: %u.%u.%u.%u, binding %s",
++	   set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++	
++	if (res >= 0)
++		res = ip_set_hash_add(set->id, ip, binding);
++
++	return res;
++}
++
++#define FOREACH_SET_DO(fn, args...) 				\
++({								\
++	ip_set_id_t __i;					\
++	struct ip_set *__set;					\
++								\
++	for (__i = 0; __i < ip_set_max; __i++) {		\
++		__set = ip_set_list[__i];			\
++		if (__set != NULL)				\
++			fn(__set , ##args);			\
++	}							\
++})
++
++static inline void
++__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
++{
++	if (set_hash->id == id)
++		__set_hash_del(set_hash);
++}
++
++static inline void
++__unbind_default(struct ip_set *set)
++{
++	if (set->binding != IP_SET_INVALID_ID) {
++		/* Sets as binding values are referenced */
++		__ip_set_put(set->binding);
++		set->binding = IP_SET_INVALID_ID;
++	}
++}
++
++static int
++ip_set_unbindip(ip_set_id_t index,
++	        const void *data,
++	        size_t size)
++{
++	struct ip_set *set;
++	struct ip_set_req_bind *req_bind;
++	ip_set_ip_t ip;
++	int res;
++
++	DP("");
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++	
++	DP("%u %s", index, req_bind->binding);
++	if (index == IP_SET_INVALID_ID) {
++		/* unbind :all: */
++		if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++			/* Default binding of sets */
++			write_lock_bh(&ip_set_lock);
++			FOREACH_SET_DO(__unbind_default);
++			write_unlock_bh(&ip_set_lock);
++			return 0;
++		} else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++			/* Flush all bindings of all sets*/
++			write_lock_bh(&ip_set_lock);
++			FOREACH_HASH_RW_DO(__set_hash_del);
++			write_unlock_bh(&ip_set_lock);
++			return 0;
++		}
++		DP("unreachable reached!");
++		return -EINVAL;
++	}
++	
++	set = ip_set_list[index];
++	IP_SET_ASSERT(set);
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of set */
++		ip_set_id_t binding = ip_set_find_byindex(set->binding);
++
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++			
++		write_lock_bh(&ip_set_lock);
++		/* Sets in hash values are referenced */
++		__ip_set_put(set->binding);
++		set->binding = IP_SET_INVALID_ID;
++		write_unlock_bh(&ip_set_lock);
++
++		return 0;
++	} else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++		/* Flush all bindings */
++
++		write_lock_bh(&ip_set_lock);
++		FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++		write_unlock_bh(&ip_set_lock);
++		return 0;
++	}
++	
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++
++	DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
++	if (res >= 0)
++		res = ip_set_hash_del(set->id, ip);
++
++	return res;
++}
++
++static int
++ip_set_testbind(ip_set_id_t index,
++	        const void *data,
++	        size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_req_bind *req_bind;
++	ip_set_id_t binding;
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of set */
++		char *binding_name;
++		
++		if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++			return -EINVAL;
++
++		binding_name = (char *)(data + sizeof(struct ip_set_req_bind));	
++		binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		binding = ip_set_find_byname(binding_name);
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++		
++		res = (set->binding == binding) ? -EEXIST : 0;
++
++		return res;
++	}
++	binding = ip_set_find_byname(req_bind->binding);
++	if (binding == IP_SET_INVALID_ID)
++		return -ENOENT;
++		
++	
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++	DP("set %s, ip: %u.%u.%u.%u, binding %s",
++	   set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++	   
++	if (res >= 0)
++		res = (ip_set_find_in_hash(set->id, ip) == binding)
++			? -EEXIST : 0;
++
++	return res;
++}
++
++static struct ip_set_type *
++find_set_type_rlock(const char *typename)
++{
++	struct ip_set_type *type;
++	
++	read_lock_bh(&ip_set_lock);
++	type = find_set_type(typename);
++	if (type == NULL)
++		read_unlock_bh(&ip_set_lock);
++
++	return type;
++}
++
++static int
++find_free_id(const char *name,
++	     ip_set_id_t *index,
++	     ip_set_id_t *id)
++{
++	ip_set_id_t i;
++
++	*id = IP_SET_INVALID_ID;
++	for (i = 0;  i < ip_set_max; i++) {
++		if (ip_set_list[i] == NULL) {
++			if (*id == IP_SET_INVALID_ID)
++				*id = *index = i;
++		} else if (strcmp(name, ip_set_list[i]->name) == 0)
++			/* Name clash */
++			return -EEXIST;
++	}
++	if (*id == IP_SET_INVALID_ID)
++		/* No free slot remained */
++		return -ERANGE;
++	/* Check that index is usable as id (swapping) */
++    check:	
++	for (i = 0;  i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && ip_set_list[i]->id == *id) {
++		    *id = i;
++		    goto check;
++		}
++	}
++	return 0;
++}
++
++/*
++ * Create a set
++ */
++static int
++ip_set_create(const char *name,
++	      const char *typename,
++	      ip_set_id_t restore,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set;
++	ip_set_id_t index = 0, id;
++	int res = 0;
++
++	DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++	/*
++	 * First, and without any locks, allocate and initialize
++	 * a normal base set structure.
++	 */
++	set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
++	if (!set)
++		return -ENOMEM;
++	set->lock = RW_LOCK_UNLOCKED;
++	strncpy(set->name, name, IP_SET_MAXNAMELEN);
++	set->binding = IP_SET_INVALID_ID;
++	atomic_set(&set->ref, 0);
++
++	/*
++	 * Next, take the &ip_set_lock, check that we know the type,
++	 * and take a reference on the type, to make sure it
++	 * stays available while constructing our new set.
++	 *
++	 * After referencing the type, we drop the &ip_set_lock,
++	 * and let the new set construction run without locks.
++	 */
++	set->type = find_set_type_rlock(typename);
++	if (set->type == NULL) {
++		/* Try loading the module */
++		char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
++		strcpy(modulename, "ip_set_");
++		strcat(modulename, typename);
++		DP("try to load %s", modulename);
++		request_module(modulename);
++		set->type = find_set_type_rlock(typename);
++	}
++	if (set->type == NULL) {
++		ip_set_printk("no set type '%s', set '%s' not created",
++			      typename, name);
++		res = -ENOENT;
++		goto out;
++	}
++	if (!try_module_get(set->type->me)) {
++		read_unlock_bh(&ip_set_lock);
++		res = -EFAULT;
++		goto out;
++	}
++	read_unlock_bh(&ip_set_lock);
++
++	/*
++	 * Without holding any locks, create private part.
++	 */
++	res = set->type->create(set, data, size);
++	if (res != 0)
++		goto put_out;
++
++	/* BTW, res==0 here. */
++
++	/*
++	 * Here, we have a valid, constructed set. &ip_set_lock again,
++	 * find free id/index and check that it is not already in 
++	 * ip_set_list.
++	 */
++	write_lock_bh(&ip_set_lock);
++	if ((res = find_free_id(set->name, &index, &id)) != 0) {
++		DP("no free id!");
++		goto cleanup;
++	}
++
++	/* Make sure restore gets the same index */
++	if (restore != IP_SET_INVALID_ID && index != restore) {
++		DP("Can't restore, sets are screwed up");
++		res = -ERANGE;
++		goto cleanup;
++	}
++	 
++	/*
++	 * Finally! Add our shiny new set to the list, and be done.
++	 */
++	DP("create: '%s' created with index %u, id %u!", set->name, index, id);
++	set->id = id;
++	ip_set_list[index] = set;
++	write_unlock_bh(&ip_set_lock);
++	return res;
++	
++    cleanup:
++	write_unlock_bh(&ip_set_lock);
++	set->type->destroy(set);
++    put_out:
++	module_put(set->type->me);
++    out:
++	kfree(set);
++	return res;
++}
++
++/*
++ * Destroy a given existing set
++ */
++static void
++ip_set_destroy_set(ip_set_id_t index)
++{
++	struct ip_set *set = ip_set_list[index];
++
++	IP_SET_ASSERT(set);
++	DP("set: %s",  set->name);
++	write_lock_bh(&ip_set_lock);
++	FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++	if (set->binding != IP_SET_INVALID_ID)
++		__ip_set_put(set->binding);
++	ip_set_list[index] = NULL;
++	write_unlock_bh(&ip_set_lock);
++
++	/* Must call it without holding any lock */
++	set->type->destroy(set);
++	module_put(set->type->me);
++	kfree(set);
++}
++
++/*
++ * Destroy a set - or all sets
++ * Sets must not be referenced/used.
++ */
++static int
++ip_set_destroy(ip_set_id_t index)
++{
++	ip_set_id_t i;
++
++	/* ref modification always protected by the mutex */
++	if (index != IP_SET_INVALID_ID) {
++		if (atomic_read(&ip_set_list[index]->ref))
++			return -EBUSY;
++		ip_set_destroy_set(index);
++	} else {
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL 
++			    && (atomic_read(&ip_set_list[i]->ref)))
++			    	return -EBUSY;
++		}
++
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL)
++				ip_set_destroy_set(i);
++		}
++	}
++	return 0;
++}
++
++static void
++ip_set_flush_set(struct ip_set *set)
++{
++	DP("set: %s %u",  set->name, set->id);
++
++	write_lock_bh(&set->lock);
++	set->type->flush(set);
++	write_unlock_bh(&set->lock);
++}
++
++/* 
++ * Flush data in a set - or in all sets
++ */
++static int
++ip_set_flush(ip_set_id_t index)
++{
++	if (index != IP_SET_INVALID_ID) {
++		IP_SET_ASSERT(ip_set_list[index]);
++		ip_set_flush_set(ip_set_list[index]);
++	} else
++		FOREACH_SET_DO(ip_set_flush_set);
++
++	return 0;
++}
++
++/* Rename a set */
++static int
++ip_set_rename(ip_set_id_t index, const char *name)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_id_t i;
++	int res = 0;
++
++	DP("set: %s to %s",  set->name, name);
++	write_lock_bh(&ip_set_lock);
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strncmp(ip_set_list[i]->name, 
++			       name,
++			       IP_SET_MAXNAMELEN - 1) == 0) {
++			res = -EEXIST;
++			goto unlock;
++		}
++	}
++	strncpy(set->name, name, IP_SET_MAXNAMELEN);
++    unlock:
++	write_unlock_bh(&ip_set_lock);
++	return res;
++}
++
++/*
++ * Swap two sets so that name/index points to the other.
++ * References are also swapped.
++ */
++static int
++ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
++{
++	struct ip_set *from = ip_set_list[from_index];
++	struct ip_set *to = ip_set_list[to_index];
++	char from_name[IP_SET_MAXNAMELEN];
++	u_int32_t from_ref;
++
++	DP("set: %s to %s",  from->name, to->name);
++	/* Features must not change. Artifical restriction. */
++	if (from->type->features != to->type->features)
++		return -ENOEXEC;
++
++	/* No magic here: ref munging protected by the mutex */	
++	write_lock_bh(&ip_set_lock);
++	strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
++	from_ref = atomic_read(&from->ref);
++
++	strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
++	atomic_set(&from->ref, atomic_read(&to->ref));
++	strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
++	atomic_set(&to->ref, from_ref);
++	
++	ip_set_list[from_index] = to;
++	ip_set_list[to_index] = from;
++	
++	write_unlock_bh(&ip_set_lock);
++	return 0;
++}
++
++/*
++ * List set data
++ */
++
++static inline void
++__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
++			      ip_set_id_t id, size_t *size)
++{
++	if (set_hash->id == id)
++		*size += sizeof(struct ip_set_hash_list);
++}
++
++static inline void
++__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
++			      ip_set_id_t id, size_t *size)
++{
++	if (set_hash->id == id)
++		*size += sizeof(struct ip_set_hash_save);
++}
++
++static inline void
++__set_hash_bindings(struct ip_set_hash *set_hash,
++		    ip_set_id_t id, void *data, int *used)
++{
++	if (set_hash->id == id) {
++		struct ip_set_hash_list *hash_list = 
++			(struct ip_set_hash_list *)(data + *used);
++
++		hash_list->ip = set_hash->ip;
++		hash_list->binding = set_hash->binding;
++		*used += sizeof(struct ip_set_hash_list);
++	}
++}
++
++static int ip_set_list_set(ip_set_id_t index,
++			   void *data,
++			   int *used,
++			   int len)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_list *set_list;
++
++	/* Pointer to our header */
++	set_list = (struct ip_set_list *) (data + *used);
++
++	DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
++
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_list) > len)
++		goto not_enough_mem;
++	*used += sizeof(struct ip_set_list);
++
++	read_lock_bh(&set->lock);
++	/* Get and ensure set specific header size */
++	set_list->header_size = set->type->header_size;
++	if (*used + set_list->header_size > len)
++		goto unlock_set;
++
++	/* Fill in the header */
++	set_list->index = index;
++	set_list->binding = set->binding;
++	set_list->ref = atomic_read(&set->ref);
++
++	/* Fill in set spefific header data */
++	set->type->list_header(set, data + *used);
++	*used += set_list->header_size;
++
++	/* Get and ensure set specific members size */
++	set_list->members_size = set->type->list_members_size(set);
++	if (*used + set_list->members_size > len)
++		goto unlock_set;
++
++	/* Fill in set spefific members data */
++	set->type->list_members(set, data + *used);
++	*used += set_list->members_size;
++	read_unlock_bh(&set->lock);
++
++	/* Bindings */
++
++	/* Get and ensure set specific bindings size */
++	set_list->bindings_size = 0;
++	FOREACH_HASH_DO(__set_hash_bindings_size_list,
++			set->id, &set_list->bindings_size);
++	if (*used + set_list->bindings_size > len)
++		goto not_enough_mem;
++
++	/* Fill in set spefific bindings data */
++	FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
++	
++	return 0;
++
++    unlock_set:
++	read_unlock_bh(&set->lock);
++    not_enough_mem:
++	DP("not enough mem, try again");
++	return -EAGAIN;
++}
++
++/*
++ * Save sets
++ */
++static int ip_set_save_set(ip_set_id_t index,
++			   void *data,
++			   int *used,
++			   int len)
++{
++	struct ip_set *set;
++	struct ip_set_save *set_save;
++
++	/* Pointer to our header */
++	set_save = (struct ip_set_save *) (data + *used);
++
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_save) > len)
++		goto not_enough_mem;
++	*used += sizeof(struct ip_set_save);
++
++	set = ip_set_list[index];
++	DP("set: %s, used: %u(%u) %p %p", set->name, *used, len, 
++	   data, data + *used);
++
++	read_lock_bh(&set->lock);
++	/* Get and ensure set specific header size */
++	set_save->header_size = set->type->header_size;
++	if (*used + set_save->header_size > len)
++		goto unlock_set;
++
++	/* Fill in the header */
++	set_save->index = index;
++	set_save->binding = set->binding;
++
++	/* Fill in set spefific header data */
++	set->type->list_header(set, data + *used);
++	*used += set_save->header_size;
++
++	DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
++	   set_save->header_size, data, data + *used);
++	/* Get and ensure set specific members size */
++	set_save->members_size = set->type->list_members_size(set);
++	if (*used + set_save->members_size > len)
++		goto unlock_set;
++
++	/* Fill in set spefific members data */
++	set->type->list_members(set, data + *used);
++	*used += set_save->members_size;
++	read_unlock_bh(&set->lock);
++	DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
++	   set_save->members_size, data, data + *used);
++	return 0;
++
++    unlock_set:
++	read_unlock_bh(&set->lock);
++    not_enough_mem:
++	DP("not enough mem, try again");
++	return -EAGAIN;
++}
++
++static inline void
++__set_hash_save_bindings(struct ip_set_hash *set_hash,
++			 ip_set_id_t id,
++			 void *data,
++			 int *used,
++			 int len,
++			 int *res)
++{
++	if (*res == 0
++	    && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
++		struct ip_set_hash_save *hash_save = 
++			(struct ip_set_hash_save *)(data + *used);
++		/* Ensure bindings size */
++		if (*used + sizeof(struct ip_set_hash_save) > len) {
++			*res = -ENOMEM;
++			return;
++		}
++		hash_save->id = set_hash->id;
++		hash_save->ip = set_hash->ip;
++		hash_save->binding = set_hash->binding;
++		*used += sizeof(struct ip_set_hash_save);
++	}
++}
++
++static int ip_set_save_bindings(ip_set_id_t index,
++			   	void *data,
++			   	int *used,
++			   	int len)
++{
++	int res = 0;
++	struct ip_set_save *set_save;
++
++	DP("used %u, len %u", *used, len);
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_save) > len)
++		return -ENOMEM;
++
++	/* Marker */
++	set_save = (struct ip_set_save *) (data + *used);
++	set_save->index = IP_SET_INVALID_ID;
++	set_save->header_size = 0;
++	set_save->members_size = 0;
++	*used += sizeof(struct ip_set_save);
++
++	DP("marker added used %u, len %u", *used, len);
++	/* Fill in bindings data */
++	if (index != IP_SET_INVALID_ID)
++		/* Sets are identified by id in hash */
++		index = ip_set_list[index]->id;
++	FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
++
++	return res;	
++}
++
++/*
++ * Restore sets
++ */
++static int ip_set_restore(void *data,
++			  int len)
++{
++	int res = 0;
++	int line = 0, used = 0, members_size;
++	struct ip_set *set;
++	struct ip_set_hash_save *hash_save;
++	struct ip_set_restore *set_restore;
++	ip_set_id_t index;
++
++	/* Loop to restore sets */
++	while (1) {
++		line++;
++		
++		DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++		/* Get and ensure header size */
++		if (used + sizeof(struct ip_set_restore) > len)
++			return line;
++		set_restore = (struct ip_set_restore *) (data + used);
++		used += sizeof(struct ip_set_restore);
++
++		/* Ensure data size */
++		if (used 
++		    + set_restore->header_size 
++		    + set_restore->members_size > len)
++			return line;
++
++		/* Check marker */
++		if (set_restore->index == IP_SET_INVALID_ID) {
++			line--;
++			goto bindings;
++		}
++		
++		/* Try to create the set */
++		DP("restore %s %s", set_restore->name, set_restore->typename);
++		res = ip_set_create(set_restore->name,
++				    set_restore->typename,
++				    set_restore->index,
++				    data + used,
++				    set_restore->header_size);
++		
++		if (res != 0)
++			return line;
++		used += set_restore->header_size;
++
++		index = ip_set_find_byindex(set_restore->index);
++		DP("index %u, restore_index %u", index, set_restore->index);
++		if (index != set_restore->index)
++			return line;
++		/* Try to restore members data */
++		set = ip_set_list[index];
++		members_size = 0;
++		DP("members_size %u reqsize %u",
++		   set_restore->members_size, set->type->reqsize);
++		while (members_size + set->type->reqsize <=
++		       set_restore->members_size) {
++			line++;
++		       	DP("members: %u, line %u", members_size, line);
++			res = __ip_set_addip(index,
++					   data + used + members_size,
++					   set->type->reqsize);
++			if (!(res == 0 || res == -EEXIST)) 
++				return line;
++			members_size += set->type->reqsize;
++		}
++
++		DP("members_size %u  %u",
++		   set_restore->members_size, members_size);
++		if (members_size != set_restore->members_size)
++			return line++;
++		used += set_restore->members_size;		
++	}
++	
++   bindings:
++   	/* Loop to restore bindings */
++   	while (used < len) {
++		line++;
++
++		DP("restore binding, line %u", line);		
++		/* Get and ensure size */
++		if (used + sizeof(struct ip_set_hash_save) > len)
++			return line;
++		hash_save = (struct ip_set_hash_save *) (data + used);
++		used += sizeof(struct ip_set_hash_save);
++		
++		/* hash_save->id is used to store the index */
++		index = ip_set_find_byindex(hash_save->id);
++		DP("restore binding index %u, id %u, %u -> %u",
++		   index, hash_save->id, hash_save->ip, hash_save->binding);		
++		if (index != hash_save->id)
++			return line;
++		if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
++			DP("corrupt binding set index %u", hash_save->binding);
++			return line;
++		}
++		set = ip_set_list[hash_save->id];
++		/* Null valued IP means default binding */
++		if (hash_save->ip)
++			res = ip_set_hash_add(set->id, 
++					      hash_save->ip,
++					      hash_save->binding);
++		else {
++			IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
++			write_lock_bh(&ip_set_lock);
++			set->binding = hash_save->binding;
++			__ip_set_get(set->binding);
++			write_unlock_bh(&ip_set_lock);
++			DP("default binding: %u", set->binding);
++		}
++		if (res != 0)
++			return line;
++   	}
++   	if (used != len)
++   		return line;
++   	
++	return 0;	
++}
++
++static int
++ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
++{
++	void *data;
++	int res = 0;		/* Assume OK */
++	unsigned *op;
++	struct ip_set_req_adt *req_adt;
++	ip_set_id_t index = IP_SET_INVALID_ID;
++	int (*adtfn)(ip_set_id_t index,
++		     const void *data, size_t size);
++	struct fn_table {
++		int (*fn)(ip_set_id_t index,
++			  const void *data, size_t size);
++	} adtfn_table[] =
++	{ { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
++	  { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
++	};
++
++	DP("optval=%d, user=%p, len=%d", optval, user, len);
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++	if (optval != SO_IP_SET)
++		return -EBADF;
++	if (len <= sizeof(unsigned)) {
++		ip_set_printk("short userdata (want >%zu, got %u)",
++			      sizeof(unsigned), len);
++		return -EINVAL;
++	}
++	data = vmalloc(len);
++	if (!data) {
++		DP("out of mem for %u bytes", len);
++		return -ENOMEM;
++	}
++	if (copy_from_user(data, user, len) != 0) {
++		res = -EFAULT;
++		goto done;
++	}
++	if (down_interruptible(&ip_set_app_mutex)) {
++		res = -EINTR;
++		goto done;
++	}
++
++	op = (unsigned *)data;
++	DP("op=%x", *op);
++	
++	if (*op < IP_SET_OP_VERSION) {
++		/* Check the version at the beginning of operations */
++		struct ip_set_req_version *req_version =
++			(struct ip_set_req_version *) data;
++		if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++			res = -EPROTO;
++			goto done;
++		}
++	}
++
++	switch (*op) {
++	case IP_SET_OP_CREATE:{
++		struct ip_set_req_create *req_create
++			= (struct ip_set_req_create *) data;
++		
++		if (len < sizeof(struct ip_set_req_create)) {
++			ip_set_printk("short CREATE data (want >=%zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++		res = ip_set_create(req_create->name,
++				    req_create->typename,
++				    IP_SET_INVALID_ID,
++				    data + sizeof(struct ip_set_req_create),
++				    len - sizeof(struct ip_set_req_create));
++		goto done;
++	}
++	case IP_SET_OP_DESTROY:{
++		struct ip_set_req_std *req_destroy
++			= (struct ip_set_req_std *) data;
++		
++		if (len != sizeof(struct ip_set_req_std)) {
++			ip_set_printk("invalid DESTROY data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_std), len);
++			res = -EINVAL;
++			goto done;
++		}
++		if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++			/* Destroy all sets */
++			index = IP_SET_INVALID_ID;
++		} else {
++			req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
++			index = ip_set_find_byname(req_destroy->name);
++
++			if (index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++			
++		res = ip_set_destroy(index);
++		goto done;
++	}
++	case IP_SET_OP_FLUSH:{
++		struct ip_set_req_std *req_flush =
++			(struct ip_set_req_std *) data;
++
++		if (len != sizeof(struct ip_set_req_std)) {
++			ip_set_printk("invalid FLUSH data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_std), len);
++			res = -EINVAL;
++			goto done;
++		}
++		if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++			/* Flush all sets */
++			index = IP_SET_INVALID_ID;
++		} else {
++			req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
++			index = ip_set_find_byname(req_flush->name);
++
++			if (index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++		res = ip_set_flush(index);
++		goto done;
++	}
++	case IP_SET_OP_RENAME:{
++		struct ip_set_req_create *req_rename
++			= (struct ip_set_req_create *) data;
++
++		if (len != sizeof(struct ip_set_req_create)) {
++			ip_set_printk("invalid RENAME data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++			
++		index = ip_set_find_byname(req_rename->name);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		res = ip_set_rename(index, req_rename->typename);
++		goto done;
++	}
++	case IP_SET_OP_SWAP:{
++		struct ip_set_req_create *req_swap
++			= (struct ip_set_req_create *) data;
++		ip_set_id_t to_index;
++
++		if (len != sizeof(struct ip_set_req_create)) {
++			ip_set_printk("invalid SWAP data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		index = ip_set_find_byname(req_swap->name);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		to_index = ip_set_find_byname(req_swap->typename);
++		if (to_index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		res = ip_set_swap(index, to_index);
++		goto done;
++	}
++	default: 
++		break;	/* Set identified by id */
++	}
++	
++	/* There we may have add/del/test/bind/unbind/test_bind operations */
++	if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
++		res = -EBADMSG;
++		goto done;
++	}
++	adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
++
++	if (len < sizeof(struct ip_set_req_adt)) {
++		ip_set_printk("short data in adt request (want >=%zu, got %u)",
++			      sizeof(struct ip_set_req_adt), len);
++		res = -EINVAL;
++		goto done;
++	}
++	req_adt = (struct ip_set_req_adt *) data;
++
++	/* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
++	if (!(*op == IP_SET_OP_UNBIND_SET 
++	      && req_adt->index == IP_SET_INVALID_ID)) {
++		index = ip_set_find_byindex(req_adt->index);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++	}
++	res = adtfn(index, data, len);
++
++    done:
++	up(&ip_set_app_mutex);
++	vfree(data);
++	if (res > 0)
++		res = 0;
++	DP("final result %d", res);
++	return res;
++}
++
++static int 
++ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
++{
++	int res = 0;
++	unsigned *op;
++	ip_set_id_t index = IP_SET_INVALID_ID;
++	void *data;
++	int copylen = *len;
++
++	DP("optval=%d, user=%p, len=%d", optval, user, *len);
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++	if (optval != SO_IP_SET)
++		return -EBADF;
++	if (*len < sizeof(unsigned)) {
++		ip_set_printk("short userdata (want >=%zu, got %d)",
++			      sizeof(unsigned), *len);
++		return -EINVAL;
++	}
++	data = vmalloc(*len);
++	if (!data) {
++		DP("out of mem for %d bytes", *len);
++		return -ENOMEM;
++	}
++	if (copy_from_user(data, user, *len) != 0) {
++		res = -EFAULT;
++		goto done;
++	}
++	if (down_interruptible(&ip_set_app_mutex)) {
++		res = -EINTR;
++		goto done;
++	}
++
++	op = (unsigned *) data;
++	DP("op=%x", *op);
++
++	if (*op < IP_SET_OP_VERSION) {
++		/* Check the version at the beginning of operations */
++		struct ip_set_req_version *req_version =
++			(struct ip_set_req_version *) data;
++		if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++			res = -EPROTO;
++			goto done;
++		}
++	}
++
++	switch (*op) {
++	case IP_SET_OP_VERSION: {
++		struct ip_set_req_version *req_version =
++		    (struct ip_set_req_version *) data;
++
++		if (*len != sizeof(struct ip_set_req_version)) {
++			ip_set_printk("invalid VERSION (want %zu, got %d)",
++				      sizeof(struct ip_set_req_version),
++				      *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_version->version = IP_SET_PROTOCOL_VERSION;
++		res = copy_to_user(user, req_version,
++				   sizeof(struct ip_set_req_version));
++		goto done;
++	}
++	case IP_SET_OP_GET_BYNAME: {
++		struct ip_set_req_get_set *req_get
++			= (struct ip_set_req_get_set *) data;
++
++		if (*len != sizeof(struct ip_set_req_get_set)) {
++			ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
++				      sizeof(struct ip_set_req_get_set), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byname(req_get->set.name);
++		req_get->set.index = index;
++		goto copy;
++	}
++	case IP_SET_OP_GET_BYINDEX: {
++		struct ip_set_req_get_set *req_get
++			= (struct ip_set_req_get_set *) data;
++
++		if (*len != sizeof(struct ip_set_req_get_set)) {
++			ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
++				      sizeof(struct ip_set_req_get_set), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byindex(req_get->set.index);
++		strncpy(req_get->set.name,
++			index == IP_SET_INVALID_ID ? ""
++			: ip_set_list[index]->name, IP_SET_MAXNAMELEN);
++		goto copy;
++	}
++	case IP_SET_OP_ADT_GET: {
++		struct ip_set_req_adt_get *req_get
++			= (struct ip_set_req_adt_get *) data;
++
++		if (*len != sizeof(struct ip_set_req_adt_get)) {
++			ip_set_printk("invalid ADT_GET (want %zu, got %d)",
++				      sizeof(struct ip_set_req_adt_get), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byname(req_get->set.name);
++		if (index != IP_SET_INVALID_ID) {
++			req_get->set.index = index;
++			strncpy(req_get->typename,
++				ip_set_list[index]->type->typename,
++				IP_SET_MAXNAMELEN - 1);
++		} else {
++			res = -ENOENT;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_MAX_SETS: {
++		struct ip_set_req_max_sets *req_max_sets
++			= (struct ip_set_req_max_sets *) data;
++		ip_set_id_t i;
++
++		if (*len != sizeof(struct ip_set_req_max_sets)) {
++			ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
++				      sizeof(struct ip_set_req_max_sets), *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++			req_max_sets->set.index = IP_SET_INVALID_ID;
++		} else {
++			req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++			req_max_sets->set.index = 
++				ip_set_find_byname(req_max_sets->set.name);
++			if (req_max_sets->set.index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++		req_max_sets->max_sets = ip_set_max;
++		req_max_sets->sets = 0;
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL)
++				req_max_sets->sets++;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_LIST_SIZE: 
++	case IP_SET_OP_SAVE_SIZE: {
++		struct ip_set_req_setnames *req_setnames
++			= (struct ip_set_req_setnames *) data;
++		struct ip_set_name_list *name_list;
++		struct ip_set *set;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_setnames)) {
++			ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_setnames), *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_setnames->size = 0;
++		used = sizeof(struct ip_set_req_setnames);
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] == NULL)
++				continue;
++			name_list = (struct ip_set_name_list *) 
++				(data + used);
++			used += sizeof(struct ip_set_name_list);
++			if (used > copylen) {
++				res = -EAGAIN;
++				goto done;
++			}
++			set = ip_set_list[i];
++			/* Fill in index, name, etc. */
++			name_list->index = i;
++			name_list->id = set->id;
++			strncpy(name_list->name,
++				set->name,
++				IP_SET_MAXNAMELEN - 1);
++			strncpy(name_list->typename,
++				set->type->typename,
++				IP_SET_MAXNAMELEN - 1);
++			DP("filled %s of type %s, index %u\n",
++			   name_list->name, name_list->typename,
++			   name_list->index);
++			if (!(req_setnames->index == IP_SET_INVALID_ID
++			      || req_setnames->index == i))
++			      continue;
++			/* Update size */
++			switch (*op) {
++			case IP_SET_OP_LIST_SIZE: {
++				req_setnames->size += sizeof(struct ip_set_list)
++					+ set->type->header_size
++					+ set->type->list_members_size(set);
++				/* Sets are identified by id in the hash */
++				FOREACH_HASH_DO(__set_hash_bindings_size_list, 
++						set->id, &req_setnames->size);
++				break;
++			}
++			case IP_SET_OP_SAVE_SIZE: {
++				req_setnames->size += sizeof(struct ip_set_save)
++					+ set->type->header_size
++					+ set->type->list_members_size(set);
++				FOREACH_HASH_DO(__set_hash_bindings_size_save,
++						set->id, &req_setnames->size);
++				break;
++			}
++			default:
++				break;
++			}
++		}
++		if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_LIST: {
++		struct ip_set_req_list *req_list
++			= (struct ip_set_req_list *) data;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_list)) {
++			ip_set_printk("short LIST (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_list), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		index = req_list->index;
++		if (index != IP_SET_INVALID_ID
++		    && ip_set_find_byindex(index) != index) {
++		    	res = -ENOENT;
++		    	goto done;
++		}
++		used = 0;
++		if (index == IP_SET_INVALID_ID) {
++			/* List all sets */
++			for (i = 0; i < ip_set_max && res == 0; i++) {
++				if (ip_set_list[i] != NULL)
++					res = ip_set_list_set(i, data, &used, *len);
++			}
++		} else {
++			/* List an individual set */
++			res = ip_set_list_set(index, data, &used, *len);
++		}
++		if (res != 0)
++			goto done;
++		else if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_SAVE: {
++		struct ip_set_req_list *req_save
++			= (struct ip_set_req_list *) data;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_list)) {
++			ip_set_printk("short SAVE (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_list), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		index = req_save->index;
++		if (index != IP_SET_INVALID_ID
++		    && ip_set_find_byindex(index) != index) {
++		    	res = -ENOENT;
++		    	goto done;
++		}
++		used = 0;
++		if (index == IP_SET_INVALID_ID) {
++			/* Save all sets */
++			for (i = 0; i < ip_set_max && res == 0; i++) {
++				if (ip_set_list[i] != NULL)
++					res = ip_set_save_set(i, data, &used, *len);
++			}
++		} else {
++			/* Save an individual set */
++			res = ip_set_save_set(index, data, &used, *len);
++		}
++		if (res == 0)
++			res = ip_set_save_bindings(index, data, &used, *len);
++			
++		if (res != 0)
++			goto done;
++		else if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_RESTORE: {
++		struct ip_set_req_setnames *req_restore
++			= (struct ip_set_req_setnames *) data;
++		int line;
++
++		if (*len < sizeof(struct ip_set_req_setnames)
++		    || *len != req_restore->size) {
++			ip_set_printk("invalid RESTORE (want =%zu, got %d)",
++				      req_restore->size, *len);
++			res = -EINVAL;
++			goto done;
++		}
++		line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
++				      req_restore->size - sizeof(struct ip_set_req_setnames));
++		DP("ip_set_restore: %u", line);
++		if (line != 0) {
++			res = -EAGAIN;
++			req_restore->size = line;
++			copylen = sizeof(struct ip_set_req_setnames);
++			goto copy;
++		}
++		goto done;
++	}
++	default:
++		res = -EBADMSG;
++		goto done;
++	}	/* end of switch(op) */
++
++    copy:
++   	DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++   	             		 && ip_set_list[index]
++   	             ? ip_set_list[index]->name
++   	             : ":all:", copylen);
++	res = copy_to_user(user, data, copylen);
++    	
++    done:
++	up(&ip_set_app_mutex);
++	vfree(data);
++	if (res > 0)
++		res = 0;
++	DP("final result %d", res);
++	return res;
++}
++
++static struct nf_sockopt_ops so_set = {
++	.pf 		= PF_INET,
++	.set_optmin 	= SO_IP_SET,
++	.set_optmax 	= SO_IP_SET + 1,
++	.set 		= &ip_set_sockfn_set,
++	.get_optmin 	= SO_IP_SET,
++	.get_optmax	= SO_IP_SET + 1,
++	.get		= &ip_set_sockfn_get,
++	.use		= 0
++};
++
++static int max_sets, hash_size;
++module_param(max_sets, int, 0600);
++MODULE_PARM_DESC(max_sets, "maximal number of sets");
++module_param(hash_size, int, 0600);
++MODULE_PARM_DESC(hash_size, "hash size for bindings");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("module implementing core IP set support");
++
++static int __init init(void)
++{
++	int res;
++	ip_set_id_t i;
++
++	get_random_bytes(&ip_set_hash_random, 4);
++	if (max_sets)
++		ip_set_max = max_sets;
++	ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
++	if (!ip_set_list) {
++		printk(KERN_ERR "Unable to create ip_set_list\n");
++		return -ENOMEM;
++	}
++	memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
++	if (hash_size)
++		ip_set_bindings_hash_size = hash_size;
++	ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
++	if (!ip_set_hash) {
++		printk(KERN_ERR "Unable to create ip_set_hash\n");
++		vfree(ip_set_list);
++		return -ENOMEM;
++	}
++	for (i = 0; i < ip_set_bindings_hash_size; i++)
++		INIT_LIST_HEAD(&ip_set_hash[i]);
++
++	INIT_LIST_HEAD(&set_type_list);
++
++	res = nf_register_sockopt(&so_set);
++	if (res != 0) {
++		ip_set_printk("SO_SET registry failed: %d", res);
++		vfree(ip_set_list);
++		vfree(ip_set_hash);
++		return res;
++	}
++	return 0;
++}
++
++static void __exit fini(void)
++{
++	/* There can't be any existing set or binding */
++	nf_unregister_sockopt(&so_set);
++	vfree(ip_set_list);
++	vfree(ip_set_hash);
++	DP("these are the famous last words");
++}
++
++EXPORT_SYMBOL(ip_set_register_set_type);
++EXPORT_SYMBOL(ip_set_unregister_set_type);
++
++EXPORT_SYMBOL(ip_set_get_byname);
++EXPORT_SYMBOL(ip_set_get_byindex);
++EXPORT_SYMBOL(ip_set_put);
++
++EXPORT_SYMBOL(ip_set_addip_kernel);
++EXPORT_SYMBOL(ip_set_delip_kernel);
++EXPORT_SYMBOL(ip_set_testip_kernel);
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,413 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an ip hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_iphash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = ip & map->netmask;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		DP("hash key: %u", id);
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++		if (*elem == *hash_ip)
++			return id;
++		/* No shortcut at testing - there can be deleted
++		 * entries. */
++	}
++	return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req = 
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	return __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++}
++
++static inline int
++__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	__u32 probe;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++	
++	if (!ip || map->elements > limit)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	
++	for (i = 0; i < map->probes; i++) {
++		probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++		if (*elem == *hash_ip)
++			return -EEXIST;
++		if (!*elem) {
++			*elem = *hash_ip;
++			map->elements++;
++			return 0;
++		}
++	}
++	/* Trigger rehashing */
++	return -EAGAIN;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req = 
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __addip((struct ip_set_iphash *) set->data,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t hash_ip, *elem;
++	void *members;
++	u_int32_t i, hashsize = map->hashsize;
++	int res;
++	struct ip_set_iphash *tmp;
++	
++	if (map->resize == 0)
++		return -ERANGE;
++
++    again:
++    	res = 0;
++    	
++	/* Calculate new hash size */
++	hashsize += (hashsize * map->resize)/100;
++	if (hashsize == map->hashsize)
++		hashsize++;
++	
++	ip_set_printk("rehashing of set %s triggered: "
++		      "hashsize grows from %u to %u",
++		      set->name, map->hashsize, hashsize);
++
++	tmp = kmalloc(sizeof(struct ip_set_iphash) 
++		      + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++	if (!tmp) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iphash)
++		   + map->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++	if (!tmp->members) {
++		DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++		kfree(tmp);
++		return -ENOMEM;
++	}
++	tmp->hashsize = hashsize;
++	tmp->elements = 0;
++	tmp->probes = map->probes;
++	tmp->resize = map->resize;
++	tmp->netmask = map->netmask;
++	memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++	
++	write_lock_bh(&set->lock);
++	map = (struct ip_set_iphash *) set->data; /* Play safe */
++	for (i = 0; i < map->hashsize && res == 0; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		if (*elem)
++			res = __addip(tmp, *elem, &hash_ip);
++	}
++	if (res) {
++		/* Failure, try again */
++		write_unlock_bh(&set->lock);
++		harray_free(tmp->members);
++		kfree(tmp);
++		goto again;
++	}
++	
++	/* Success at resizing! */
++	members = map->members;
++
++	map->hashsize = tmp->hashsize;
++	map->members = tmp->members;
++	write_unlock_bh(&set->lock);
++
++	harray_free(members);
++	kfree(tmp);
++
++	return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t id, *elem;
++
++	if (!ip)
++		return -ERANGE;
++
++	id = hash_id(set, ip, hash_ip);
++	if (id == UINT_MAX)
++		return -EEXIST;
++		
++	elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	*elem = 0;
++	map->elements--;
++
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req =
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_iphash_create *req =
++	    (struct ip_set_req_iphash_create *) data;
++	struct ip_set_iphash *map;
++	uint16_t i;
++
++	if (size != sizeof(struct ip_set_req_iphash_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_iphash_create),
++			       size);
++		return -EINVAL;
++	}
++
++	if (req->hashsize < 1) {
++		ip_set_printk("hashsize too small");
++		return -ENOEXEC;
++	}
++
++	if (req->probes < 1) {
++		ip_set_printk("probes too small");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_iphash) 
++		      + req->probes * sizeof(uint32_t), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iphash)
++		   + req->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	for (i = 0; i < req->probes; i++)
++		get_random_bytes(((uint32_t *) map->initval)+i, 4);
++	map->elements = 0;
++	map->hashsize = req->hashsize;
++	map->probes = req->probes;
++	map->resize = req->resize;
++	map->netmask = req->netmask;
++	map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++		kfree(map);
++		return -ENOMEM;
++	}
++
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++	harray_free(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++	map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	struct ip_set_req_iphash_create *header =
++	    (struct ip_set_req_iphash_create *) data;
++
++	header->hashsize = map->hashsize;
++	header->probes = map->probes;
++	header->resize = map->resize;
++	header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++	return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t i, *elem;
++
++	for (i = 0; i < map->hashsize; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		((ip_set_ip_t *)data)[i] = *elem;
++	}
++}
++
++static struct ip_set_type ip_set_iphash = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_iphash),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.retry			= &retry,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_iphash_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_iphash);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_iphash);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,327 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the single bitmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipmap.h>
++
++static inline ip_set_ip_t
++ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
++{
++	return (ip - map->first_ip)/map->hosts;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++	return !!test_bit(ip_to_id(map, *hash_ip), map->members);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req = 
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	int res;
++	
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	res =  __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++	return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
++		return -EEXIST;
++
++	return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req = 
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	DP("%u.%u.%u.%u", HIPQUAD(req->ip));
++	return __addip(set, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __addip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static inline int 
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
++		return -EEXIST;
++	
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req =
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	int newbytes;
++	struct ip_set_req_ipmap_create *req =
++	    (struct ip_set_req_ipmap_create *) data;
++	struct ip_set_ipmap *map;
++
++	if (size != sizeof(struct ip_set_req_ipmap_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap_create),
++			      size);
++		return -EINVAL;
++	}
++
++	DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++	   HIPQUAD(req->from), HIPQUAD(req->to));
++
++	if (req->from > req->to) {
++		DP("bad ip range");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipmap));
++		return -ENOMEM;
++	}
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	map->netmask = req->netmask;
++
++	if (req->netmask == 0xFFFFFFFF) {
++		map->hosts = 1;
++		map->sizeid = map->last_ip - map->first_ip + 1;
++	} else {
++		unsigned int mask_bits, netmask_bits;
++		ip_set_ip_t mask;
++		
++		map->first_ip &= map->netmask;	/* Should we better bark? */
++		
++		mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
++		netmask_bits = mask_to_bits(map->netmask);
++		
++		if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
++		    || netmask_bits <= mask_bits)
++			return -ENOEXEC;
++
++		DP("mask_bits %u, netmask_bits %u",
++		   mask_bits, netmask_bits);
++		map->hosts = 2 << (32 - netmask_bits - 1);
++		map->sizeid = 2 << (netmask_bits - mask_bits - 1);
++	}
++	if (map->sizeid > MAX_RANGE + 1) {
++		ip_set_printk("range too big (max %d addresses)",
++			       MAX_RANGE+1);
++		kfree(map);
++		return -ENOEXEC;
++	}
++	DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
++	newbytes = bitmap_bytes(0, map->sizeid - 1);
++	map->members = kmalloc(newbytes, GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", newbytes);
++		kfree(map);
++		return -ENOMEM;
++	}
++	memset(map->members, 0, newbytes);
++	
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	
++	kfree(map->members);
++	kfree(map);
++	
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	struct ip_set_req_ipmap_create *header =
++	    (struct ip_set_req_ipmap_create *) data;
++
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++	header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	return bitmap_bytes(0, map->sizeid - 1);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	int bytes = bitmap_bytes(0, map->sizeid - 1);
++
++	memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_ipmap = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_ipmap),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_ipmap_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipmap type of IP sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_ipmap);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_ipmap);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,535 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an ip+port hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++	struct iphdr *iph = skb->nh.iph;
++	u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++	switch (iph->protocol) {
++	case IPPROTO_TCP: {
++		struct tcphdr tcph;
++		
++		/* See comments at tcp_match in ip_tables.c */
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     tcph.source : tcph.dest);
++	    }
++	case IPPROTO_UDP: {
++		struct udphdr udph;
++
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     udph.source : udph.dest);
++	    }
++	default:
++		return INVALID_PORT;
++	}
++}
++
++static inline __u32
++jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = 
++		(struct ip_set_ipporthash *) set->data;
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = HASH_IP(map, ip, port);
++	DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		DP("hash key: %u", id);
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++		if (*elem == *hash_ip)
++			return id;
++		/* No shortcut at testing - there can be deleted
++		 * entries. */
++	}
++	return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	 ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req = 
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u",
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return 0;	
++
++	return __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++					? skb->nh.iph->saddr 
++					: skb->nh.iph->daddr),
++			port,
++			hash_ip);
++}
++
++static inline int
++__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++{
++	__u32 probe;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	for (i = 0; i < map->probes; i++) {
++		probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++		if (*elem == hash_ip)
++			return -EEXIST;
++		if (!*elem) {
++			*elem = hash_ip;
++			map->elements++;
++			return 0;
++		}
++	}
++	/* Trigger rehashing */
++	return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	if (map->elements > limit)
++		return -ERANGE;
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = HASH_IP(map, ip, port);
++	
++	return __add_haship(map, *hash_ip);
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req = 
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __addip((struct ip_set_ipporthash *) set->data, 
++			req->ip, req->port, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u", 
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return -EINVAL;	
++
++	return __addip((struct ip_set_ipporthash *) set->data,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       port,
++		       hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t *elem;
++	void *members;
++	u_int32_t i, hashsize = map->hashsize;
++	int res;
++	struct ip_set_ipporthash *tmp;
++	
++	if (map->resize == 0)
++		return -ERANGE;
++
++    again:
++    	res = 0;
++    	
++	/* Calculate new hash size */
++	hashsize += (hashsize * map->resize)/100;
++	if (hashsize == map->hashsize)
++		hashsize++;
++	
++	ip_set_printk("rehashing of set %s triggered: "
++		      "hashsize grows from %u to %u",
++		      set->name, map->hashsize, hashsize);
++
++	tmp = kmalloc(sizeof(struct ip_set_ipporthash) 
++		      + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++	if (!tmp) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipporthash)
++		   + map->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++	if (!tmp->members) {
++		DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++		kfree(tmp);
++		return -ENOMEM;
++	}
++	tmp->hashsize = hashsize;
++	tmp->elements = 0;
++	tmp->probes = map->probes;
++	tmp->resize = map->resize;
++	tmp->first_ip = map->first_ip;
++	tmp->last_ip = map->last_ip;
++	memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++	
++	write_lock_bh(&set->lock);
++	map = (struct ip_set_ipporthash *) set->data; /* Play safe */
++	for (i = 0; i < map->hashsize && res == 0; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		if (*elem)
++			res = __add_haship(tmp, *elem);
++	}
++	if (res) {
++		/* Failure, try again */
++		write_unlock_bh(&set->lock);
++		harray_free(tmp->members);
++		kfree(tmp);
++		goto again;
++	}
++	
++	/* Success at resizing! */
++	members = map->members;
++
++	map->hashsize = tmp->hashsize;
++	map->members = tmp->members;
++	write_unlock_bh(&set->lock);
++
++	harray_free(members);
++	kfree(tmp);
++
++	return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t id;
++	ip_set_ip_t *elem;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	id = hash_id(set, ip, port, hash_ip);
++
++	if (id == UINT_MAX)
++		return -EEXIST;
++		
++	elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	*elem = 0;
++	map->elements--;
++
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req =
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u",
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return -EINVAL;	
++
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       port,
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_ipporthash_create *req =
++	    (struct ip_set_req_ipporthash_create *) data;
++	struct ip_set_ipporthash *map;
++	uint16_t i;
++
++	if (size != sizeof(struct ip_set_req_ipporthash_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_ipporthash_create),
++			       size);
++		return -EINVAL;
++	}
++
++	if (req->hashsize < 1) {
++		ip_set_printk("hashsize too small");
++		return -ENOEXEC;
++	}
++
++	if (req->probes < 1) {
++		ip_set_printk("probes too small");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_ipporthash) 
++		      + req->probes * sizeof(uint32_t), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipporthash)
++		   + req->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	for (i = 0; i < req->probes; i++)
++		get_random_bytes(((uint32_t *) map->initval)+i, 4);
++	map->elements = 0;
++	map->hashsize = req->hashsize;
++	map->probes = req->probes;
++	map->resize = req->resize;
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++		kfree(map);
++		return -ENOMEM;
++	}
++
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++	harray_free(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++	map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	struct ip_set_req_ipporthash_create *header =
++	    (struct ip_set_req_ipporthash_create *) data;
++
++	header->hashsize = map->hashsize;
++	header->probes = map->probes;
++	header->resize = map->resize;
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++	return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t i, *elem;
++
++	for (i = 0; i < map->hashsize; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		((ip_set_ip_t *)data)[i] = *elem;
++	}
++}
++
++static struct ip_set_type ip_set_ipporthash = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_ipporthash),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.retry			= &retry,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_ipporthash_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_ipporthash);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_ipporthash);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,571 @@
++/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the iptree type */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++
++#include <linux/netfilter_ipv4/ip_set_iptree.h>
++
++static int limit = MAX_RANGE;
++
++/* Garbage collection interval in seconds: */
++#define IPTREE_GC_TIME		5*60
++/* Sleep so many milliseconds before trying again 
++ * to delete the gc timer at destroying/flushing a set */ 
++#define IPTREE_DESTROY_SLEEP	100
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *branch_cachep;
++static struct kmem_cache *leaf_cachep;
++#else
++static kmem_cache_t *branch_cachep;
++static kmem_cache_t *leaf_cachep;
++#endif
++
++#define ABCD(a,b,c,d,addrp) do {		\
++	a = ((unsigned char *)addrp)[3];	\
++	b = ((unsigned char *)addrp)[2];	\
++	c = ((unsigned char *)addrp)[1];	\
++	d = ((unsigned char *)addrp)[0];	\
++} while (0)
++
++#define TESTIP_WALK(map, elem, branch) do {	\
++	if ((map)->tree[elem]) {		\
++		branch = (map)->tree[elem];	\
++	} else 					\
++		return 0;			\
++} while (0)
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++
++	if (!ip)
++		return -ERANGE;
++	
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
++	TESTIP_WALK(map, a, btree);
++	TESTIP_WALK(btree, b, ctree);
++	TESTIP_WALK(ctree, c, dtree);
++	DP("%lu %lu", dtree->expires[d], jiffies);
++	return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
++			       : dtree->expires[d]);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iptree *req = 
++	    (struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	int res;
++	
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	res =  __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++	return (res < 0 ? 0 : res);
++}
++
++#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do {	\
++	if ((map)->tree[elem]) {				\
++		DP("found %u", elem);				\
++		branch = (map)->tree[elem];			\
++	} else {						\
++		branch = (type *)				\
++			kmem_cache_alloc(cachep, flags);	\
++		if (branch == NULL)				\
++			return -ENOMEM;				\
++		memset(branch, 0, sizeof(*branch));		\
++		(map)->tree[elem] = branch;			\
++		DP("alloc %u", elem);				\
++	}							\
++} while (0)	
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
++	ip_set_ip_t *hash_ip,
++	unsigned int __nocast flags)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++	int ret = 0;
++	
++	if (!ip || map->elements > limit)
++		/* We could call the garbage collector
++		 * but it's probably overkill */
++		return -ERANGE;
++	
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
++	ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
++	ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
++	ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
++	if (dtree->expires[d]
++	    && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++	    	ret = -EEXIST;
++	dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
++	/* Lottery */
++	if (dtree->expires[d] == 0)
++		dtree->expires[d] = 1;
++	DP("%u %lu", d, dtree->expires[d]);
++	if (ret == 0)
++		map->elements++;
++	return ret;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_req_iptree *req = 
++		(struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
++	return __addip(set, req->ip,
++		       req->timeout ? req->timeout : map->timeout,
++		       hash_ip,
++		       GFP_ATOMIC);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	return __addip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       map->timeout,
++		       hash_ip,
++		       GFP_ATOMIC);
++}
++
++#define DELIP_WALK(map, elem, branch) do {	\
++	if ((map)->tree[elem]) {		\
++		branch = (map)->tree[elem];	\
++	} else 					\
++		return -EEXIST;			\
++} while (0)
++
++static inline int 
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++	
++	if (!ip)
++		return -ERANGE;
++		
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DELIP_WALK(map, a, btree);
++	DELIP_WALK(btree, b, ctree);
++	DELIP_WALK(ctree, c, dtree);
++
++	if (dtree->expires[d]) {
++		dtree->expires[d] = 0;
++		map->elements--;
++		return 0;
++	}
++	return -EEXIST;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iptree *req =
++	    (struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++	for (i = 0; i < 256; i++) {	\
++		if (!(map)->tree[i])	\
++			continue;	\
++		branch = (map)->tree[i]
++
++#define LOOP_WALK_END }
++
++static void ip_tree_gc(unsigned long ul_set)
++{
++	struct ip_set *set = (void *) ul_set;
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	unsigned char i,j,k;
++
++	i = j = k = 0;
++	DP("gc: %s", set->name);
++	write_lock_bh(&set->lock);
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]) {
++			DP("gc: %u %u %u %u: expires %lu jiffies %lu",
++			    a, b, c, d,
++			    dtree->expires[d], jiffies);
++			if (map->timeout
++			    && time_before(dtree->expires[d], jiffies)) {
++			    	dtree->expires[d] = 0;
++			    	map->elements--;
++			} else
++				k = 1;
++		}
++	}
++	if (k == 0) {
++		DP("gc: %s: leaf %u %u %u empty",
++		    set->name, a, b, c);
++		kmem_cache_free(leaf_cachep, dtree);
++		ctree->tree[c] = NULL;
++	} else {
++		DP("gc: %s: leaf %u %u %u not empty",
++		    set->name, a, b, c);
++		j = 1;
++		k = 0;
++	}
++	LOOP_WALK_END;
++	if (j == 0) {
++		DP("gc: %s: branch %u %u empty",
++		    set->name, a, b);
++		kmem_cache_free(branch_cachep, ctree);
++		btree->tree[b] = NULL;
++	} else {
++		DP("gc: %s: branch %u %u not empty",
++		    set->name, a, b);
++		i = 1;
++		j = k = 0;
++	}
++	LOOP_WALK_END;
++	if (i == 0) {
++		DP("gc: %s: branch %u empty",
++		    set->name, a);
++		kmem_cache_free(branch_cachep, btree);
++		map->tree[a] = NULL;
++	} else {
++		DP("gc: %s: branch %u not empty",
++		    set->name, a);
++		i = j = k = 0;
++	}
++	LOOP_WALK_END;
++	write_unlock_bh(&set->lock);
++	
++	map->gc.expires = jiffies + map->gc_interval * HZ;
++	add_timer(&map->gc);
++}
++
++static inline void init_gc_timer(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	/* Even if there is no timeout for the entries,
++	 * we still have to call gc because delete
++	 * do not clean up empty branches */
++	map->gc_interval = IPTREE_GC_TIME;
++	init_timer(&map->gc);
++	map->gc.data = (unsigned long) set;
++	map->gc.function = ip_tree_gc;
++	map->gc.expires = jiffies + map->gc_interval * HZ;
++	add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_iptree_create *req =
++	    (struct ip_set_req_iptree_create *) data;
++	struct ip_set_iptree *map;
++
++	if (size != sizeof(struct ip_set_req_iptree_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree_create),
++			      size);
++		return -EINVAL;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iptree));
++		return -ENOMEM;
++	}
++	memset(map, 0, sizeof(*map));
++	map->timeout = req->timeout;
++	map->elements = 0;
++	set->data = map;
++
++	init_gc_timer(set);
++
++	return 0;
++}
++
++static void __flush(struct ip_set_iptree *map)
++{
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	kmem_cache_free(leaf_cachep, dtree);
++	LOOP_WALK_END;
++	kmem_cache_free(branch_cachep, ctree);
++	LOOP_WALK_END;
++	kmem_cache_free(branch_cachep, btree);
++	LOOP_WALK_END;
++	map->elements = 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	/* gc might be running */
++	while (!del_timer(&map->gc))
++		msleep(IPTREE_DESTROY_SLEEP);
++	__flush(map);
++	kfree(map);
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	unsigned int timeout = map->timeout;
++	
++	/* gc might be running */
++	while (!del_timer(&map->gc))
++		msleep(IPTREE_DESTROY_SLEEP);
++	__flush(map);
++	memset(map, 0, sizeof(*map));
++	map->timeout = timeout;
++
++	init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_req_iptree_create *header =
++	    (struct ip_set_req_iptree_create *) data;
++
++	header->timeout = map->timeout;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	unsigned int count = 0;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]
++		    && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++		    	count++;
++	}
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++
++	DP("members %u", count);
++	return (count * sizeof(struct ip_set_req_iptree));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	size_t offset = 0;
++	struct ip_set_req_iptree *entry;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]
++		    && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
++		    	entry = (struct ip_set_req_iptree *)(data + offset);
++		    	entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
++		    	entry->timeout = !map->timeout ? 0 
++				: (dtree->expires[d] - jiffies)/HZ;
++			offset += sizeof(struct ip_set_req_iptree);
++		}
++	}
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++}
++
++static struct ip_set_type ip_set_iptree = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_iptree),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_iptree_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptree type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	int ret;
++	
++	branch_cachep = kmem_cache_create("ip_set_iptreeb",
++				sizeof(struct ip_set_iptreeb),
++				0, 0, NULL, NULL);
++	if (!branch_cachep) {
++		printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
++		ret = -ENOMEM;
++		goto out;
++	}
++	leaf_cachep = kmem_cache_create("ip_set_iptreed",
++				sizeof(struct ip_set_iptreed),
++				0, 0, NULL, NULL);
++	if (!leaf_cachep) {
++		printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
++		ret = -ENOMEM;
++		goto free_branch;
++	}
++	ret = ip_set_register_set_type(&ip_set_iptree);
++	if (ret == 0)
++		goto out;
++
++	kmem_cache_destroy(leaf_cachep);
++    free_branch:	
++	kmem_cache_destroy(branch_cachep);
++    out:
++	return ret;
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_iptree);
++	kmem_cache_destroy(leaf_cachep);
++	kmem_cache_destroy(branch_cachep);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,353 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the macipmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/if_ether.h>
++#include <linux/vmalloc.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_macipmap.h>
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table = (struct ip_set_macip *) map->members;	
++	struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++
++	if (req->ip < map->first_ip || req->ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = req->ip;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));		
++	if (test_bit(IPSET_MACIP_ISSET,
++		     (void *) &table[req->ip - map->first_ip].flags)) {
++		return (memcmp(req->ethernet,
++			       &table[req->ip - map->first_ip].ethernet,
++			       ETH_ALEN) == 0);
++	} else {
++		return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++	}
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++	ip_set_ip_t ip;
++	
++	ip = ntohl(flags[index] & IPSET_SRC
++			? skb->nh.iph->saddr
++			: skb->nh.iph->daddr);
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return 0;
++
++	*hash_ip = ip;	
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));		
++	if (test_bit(IPSET_MACIP_ISSET,
++	    (void *) &table[ip - map->first_ip].flags)) {
++		/* Is mac pointer valid?
++		 * If so, compare... */
++		return (skb->mac.raw >= skb->head
++			&& (skb->mac.raw + ETH_HLEN) <= skb->data
++			&& (memcmp(eth_hdr(skb)->h_source,
++				   &table[ip - map->first_ip].ethernet,
++				   ETH_ALEN) == 0));
++	} else {
++		return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++	}
++}
++
++/* returns 0 on success */
++static inline int
++__addip(struct ip_set *set, 
++	ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++	if (test_and_set_bit(IPSET_MACIP_ISSET, 
++			     (void *) &table[ip - map->first_ip].flags))
++		return -EEXIST;
++
++	*hash_ip = ip;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++	return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_macipmap *req =
++	    (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __addip(set, req->ip, req->ethernet, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t ip;
++	
++	ip = ntohl(flags[index] & IPSET_SRC
++			? skb->nh.iph->saddr
++			: skb->nh.iph->daddr);
++
++	if (!(skb->mac.raw >= skb->head
++	      && (skb->mac.raw + ETH_HLEN) <= skb->data))
++		return -EINVAL;
++
++	return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++	if (!test_and_clear_bit(IPSET_MACIP_ISSET, 
++				(void *)&table[ip - map->first_ip].flags))
++		return -EEXIST;
++
++	*hash_ip = ip;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++     ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_macipmap *req =
++	    (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
++{
++	return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	int newbytes;
++	struct ip_set_req_macipmap_create *req =
++	    (struct ip_set_req_macipmap_create *) data;
++	struct ip_set_macipmap *map;
++
++	if (size != sizeof(struct ip_set_req_macipmap_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap_create),
++			      size);
++		return -EINVAL;
++	}
++
++	DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++	   HIPQUAD(req->from), HIPQUAD(req->to));
++
++	if (req->from > req->to) {
++		DP("bad ip range");
++		return -ENOEXEC;
++	}
++
++	if (req->to - req->from > MAX_RANGE) {
++		ip_set_printk("range too big (max %d addresses)",
++			       MAX_RANGE+1);
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_macipmap));
++		return -ENOMEM;
++	}
++	map->flags = req->flags;
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	newbytes = members_size(map->first_ip, map->last_ip);
++	map->members = ip_set_malloc(newbytes);
++	DP("members: %u %p", newbytes, map->members);
++	if (!map->members) {
++		DP("out of memory for %d bytes", newbytes);
++		kfree(map);
++		return -ENOMEM;
++	}
++	memset(map->members, 0, newbytes);
++	
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	memset(map->members, 0, members_size(map->first_ip, map->last_ip));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_req_macipmap_create *header =
++	    (struct ip_set_req_macipmap_create *) data;
++
++	DP("list_header %x %x %u", map->first_ip, map->last_ip,
++	   map->flags);
++
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++	header->flags = map->flags;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	DP("%u", members_size(map->first_ip, map->last_ip));
++	return members_size(map->first_ip, map->last_ip);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	int bytes = members_size(map->first_ip, map->last_ip);
++
++	DP("members: %u %p", bytes, map->members);
++	memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_macipmap = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_macipmap),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_macipmap_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("macipmap type of IP sets");
++
++static int __init init(void)
++{
++	init_max_malloc_size();
++	return ip_set_register_set_type(&ip_set_macipmap);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_macipmap);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,481 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing a cidr nethash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_nethash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id_cidr(struct ip_set_nethash *map,
++	     ip_set_ip_t ip,
++	     unsigned char cidr,
++	     ip_set_ip_t *hash_ip)
++{
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = pack(ip, cidr);
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;